-
Notifications
You must be signed in to change notification settings - Fork 4
/
chunks.go
110 lines (90 loc) · 2.78 KB
/
chunks.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
/*
Package chunks provides the basic structure for a pair of the weak and strong checksums.
Since this is fairly widely used, splitting this out breaks a number of possible circular dependencies
*/
package chunks
import (
"bytes"
"errors"
"io"
)
// For a given Block, the Weak & Strong hashes, and the offset
// This structure is only used to generate the index of reference files, since
// computing the strong checksum is not done when comparing unless the weak checksum matches
type ChunkChecksum struct {
// an offset in terms of chunk count
ChunkOffset uint
// the size of the block
Size int64
WeakChecksum []byte
StrongChecksum []byte
}
// compares a checksum to another based on the checksums, not the offset
func (chunk ChunkChecksum) Match(other ChunkChecksum) bool {
weakEqual := bytes.Compare(chunk.WeakChecksum, other.WeakChecksum) == 0
strongEqual := false
if weakEqual {
strongEqual = bytes.Compare(chunk.StrongChecksum, other.StrongChecksum) == 0
}
return weakEqual && strongEqual
}
var ErrPartialChecksum = errors.New("Reader length was not a multiple of the checksums")
// Loads chunks from a sources, assuming alternating weak then strong hashes
// This function attempts to be compatible with the original zsync implementation therefore the following assumptions
// are made:
// - weak checksums are stored in Little Endian notation
// - only first <strongHashSize> bytes of the strong checksum are provided
func LoadChecksumsFromReaderLegacy(
r io.Reader,
weakHashSize int,
strongHashSize int,
) ([]ChunkChecksum, error) {
result := make([]ChunkChecksum, 0, 20)
offset := uint(0)
temp := ChunkChecksum{}
for {
weakBuffer := make([]byte, weakHashSize)
n, err := io.ReadFull(r, weakBuffer)
if n == weakHashSize {
temp.ChunkOffset = offset
temp.WeakChecksum = TransformToInternalRepresentation(weakBuffer)
} else if n == 0 && err == io.EOF {
break
} else {
return nil, ErrPartialChecksum
}
strongBuffer := make([]byte, strongHashSize)
n, err = io.ReadFull(r, strongBuffer)
if n == strongHashSize {
temp.StrongChecksum = strongBuffer
result = append(result, temp)
if err == io.EOF {
break
}
} else {
return nil, ErrPartialChecksum
}
offset += 1
}
return result, nil
}
// Required for zsync legacy support
func TransformToInternalRepresentation(inWeakBuffer []byte) []byte {
weakBuffer := make([]byte, 4)
// reverse bytes order
for i, c := range inWeakBuffer {
weakBuffer[3-i] = c
}
return weakBuffer
}
// satisfies filechecksum.ChecksumLookup
type StrongChecksumGetter []ChunkChecksum
func (s StrongChecksumGetter) GetStrongChecksumForBlock(blockID int) []byte {
return s[blockID].StrongChecksum
}
type ChunkInfo struct {
Size int64
Source io.ReadSeeker
SourceOffset int64
TargetOffset int64
}