Small refactorings

* use uint instead of uint32 in packs/indexes
 * use ID.Str() for debug messages
 * add ParallelIDWorkFunc
This commit is contained in:
Alexander Neumann 2015-08-08 13:47:08 +02:00
parent 2cb0fbf589
commit d42ff509ba
3 changed files with 25 additions and 8 deletions

View File

@ -57,7 +57,7 @@ func (t *BlobType) UnmarshalJSON(buf []byte) error {
// Blob is a blob within a pack. // Blob is a blob within a pack.
type Blob struct { type Blob struct {
Type BlobType Type BlobType
Length uint32 Length uint
ID backend.ID ID backend.ID
Offset uint Offset uint
} }
@ -100,7 +100,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, rd io.Reader) (int64, error) {
c := Blob{Type: t, ID: id} c := Blob{Type: t, ID: id}
n, err := io.Copy(p.hw, rd) n, err := io.Copy(p.hw, rd)
c.Length = uint32(n) c.Length = uint(n)
c.Offset = p.bytes c.Offset = p.bytes
p.bytes += uint(n) p.bytes += uint(n)
p.blobs = append(p.blobs, c) p.blobs = append(p.blobs, c)
@ -164,7 +164,7 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
for _, b := range p.blobs { for _, b := range p.blobs {
entry := headerEntry{ entry := headerEntry{
Type: b.Type, Type: b.Type,
Length: b.Length, Length: uint32(b.Length),
ID: b.ID, ID: b.ID,
} }
@ -276,7 +276,7 @@ func NewUnpacker(k *crypto.Key, entries []Blob, rd io.ReadSeeker) (*Unpacker, er
entries = append(entries, Blob{ entries = append(entries, Blob{
Type: e.Type, Type: e.Type,
Length: e.Length, Length: uint(e.Length),
ID: e.ID, ID: e.ID,
Offset: pos, Offset: pos,
}) })

View File

@ -109,7 +109,7 @@ func (idx *Index) Merge(other *Index) {
for k, v := range other.pack { for k, v := range other.pack {
if _, ok := idx.pack[k]; ok { if _, ok := idx.pack[k]; ok {
debug.Log("Index.Merge", "index already has key %v, updating", k[:8]) debug.Log("Index.Merge", "index already has key %v, updating", k.Str())
} }
idx.pack[k] = v idx.pack[k] = v
@ -146,7 +146,7 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
ID: id, ID: id,
Offset: blob.offset, Offset: blob.offset,
Type: blob.tpe, Type: blob.tpe,
Length: uint32(blob.length), Length: blob.length,
}, },
PackID: *blob.packID, PackID: *blob.packID,
}: }:
@ -166,7 +166,7 @@ func (idx *Index) Count(t pack.BlobType) (n uint) {
for id, blob := range idx.pack { for id, blob := range idx.pack {
if blob.tpe == t { if blob.tpe == t {
n++ n++
debug.Log("Index.Count", " blob %v counted: %v", id[:8], blob) debug.Log("Index.Count", " blob %v counted: %v", id.Str(), blob)
} }
} }
@ -201,7 +201,7 @@ func (idx *Index) generatePackList(selectFn func(indexEntry) bool) ([]*packJSON,
if blob.packID.IsNull() { if blob.packID.IsNull() {
debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)", debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)",
id[:8], blob.tpe, blob.offset, blob.length) id.Str(), blob.tpe, blob.offset, blob.length)
return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id) return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id)
} }

View File

@ -20,6 +20,10 @@ func closeIfOpen(ch chan struct{}) {
// processing stops. If done is closed, the function should return. // processing stops. If done is closed, the function should return.
type ParallelWorkFunc func(id string, done <-chan struct{}) error type ParallelWorkFunc func(id string, done <-chan struct{}) error
// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned,
// processing stops. If done is closed, the function should return.
type ParallelIDWorkFunc func(id backend.ID, done <-chan struct{}) error
// FilesInParallel runs n workers of f in parallel, on the IDs that // FilesInParallel runs n workers of f in parallel, on the IDs that
// repo.List(t) yield. If f returns an error, the process is aborted and the // repo.List(t) yield. If f returns an error, the process is aborted and the
// first error is returned. // first error is returned.
@ -69,3 +73,16 @@ func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWork
return nil return nil
} }
// ParallelWorkFuncParseID converts a function that takes a backend.ID to a
// function that takes a string.
func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
return func(s string, done <-chan struct{}) error {
id, err := backend.ParseID(s)
if err != nil {
return err
}
return f(id, done)
}
}