/
undo.go
217 lines (182 loc) · 5.59 KB
/
undo.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
package accumulator
import (
"encoding/binary"
"fmt"
"io"
)
/* we need to be able to undo blocks! for bridge nodes at least.
compact nodes can just keep old roots.
although actually it can make sense for non-bridge nodes to undo as well...
*/
// TODO in general, deal with numLeaves going to 0
// blockUndo is all the data needed to undo a block: number of adds,
// and all the hashes that got deleted and where they were from
type UndoBlock struct {
Height int32 // height of block
numAdds uint32 // number of adds in the block
positions []uint64 // position of all deletions this block
hashes []Hash // hashes that were deleted
}
// ToString returns a string
func (u *UndoBlock) ToString() string {
s := fmt.Sprintf("- uuuu undo block %d adds\t", u.numAdds)
s += fmt.Sprintf("%d dels:\t", len(u.positions))
if len(u.positions) != len(u.hashes) {
s += "error"
return s
}
for i, _ := range u.positions {
s += fmt.Sprintf("%d %x,\t", u.positions[i], u.hashes[i][:4])
}
s += "\n"
return s
}
// SerializeSize returns how many bytes it would take to serialize this undoblock.
func (u *UndoBlock) SerializeSize() int {
// Size of u.numAdds + len(u.positions) + each position takes up 8 bytes
size := 4 + 8 + (len(u.positions) * 8)
// Size of len(u.hashes) + each hash takes up 32 bytes
size += 8 + (len(u.hashes) * 32)
return size
}
// Serialize encodes the undoblock into the given writer.
func (u *UndoBlock) Serialize(w io.Writer) error {
err := binary.Write(w, binary.BigEndian, u.numAdds)
if err != nil {
return err
}
err = binary.Write(w, binary.BigEndian, uint64(len(u.positions)))
if err != nil {
return err
}
err = binary.Write(w, binary.BigEndian, u.positions)
if err != nil {
return err
}
err = binary.Write(w, binary.BigEndian, uint64(len(u.hashes)))
if err != nil {
return err
}
for _, hash := range u.hashes {
n, err := w.Write(hash[:])
if err != nil {
return err
}
if n != 32 {
err := fmt.Errorf("UndoBlock Serialize supposed to write 32 bytes but wrote %d bytes", n)
return err
}
}
return nil
}
// Deserialize decodes an undoblock from the reader.
func (u *UndoBlock) Deserialize(r io.Reader) error {
err := binary.Read(r, binary.BigEndian, &u.numAdds)
if err != nil {
return err
}
var posCount uint64
err = binary.Read(r, binary.BigEndian, &posCount)
if err != nil {
return err
}
u.positions = make([]uint64, posCount)
err = binary.Read(r, binary.BigEndian, u.positions)
if err != nil {
return err
}
var hashCount uint64
err = binary.Read(r, binary.BigEndian, &hashCount)
if err != nil {
return err
}
u.hashes = make([]Hash, hashCount)
for i := uint64(0); i < hashCount; i++ {
n, err := r.Read(u.hashes[i][:])
if err != nil {
return err
}
if n != 32 {
err := fmt.Errorf("UndoBlock Deserialize supposed to read 32 bytes but read %d bytes", n)
return err
}
}
return nil
}
// Undo reverts a Modify() with the given undoBlock.
func (f *Forest) Undo(ub UndoBlock) error {
prevAdds := uint64(ub.numAdds)
prevDels := uint64(len(ub.hashes))
// how many leaves were there at the last block?
prevNumLeaves := f.numLeaves + prevDels - prevAdds
// run the transform to figure out where things came from
leafMoves := floorTransform(ub.positions, prevNumLeaves, f.rows)
reverseArrowSlice(leafMoves)
// first undo the leaves added in the last block
f.numLeaves -= prevAdds
// remove everything between prevNumLeaves and numLeaves from positionMap
for p := f.numLeaves; p < f.numLeaves+prevAdds; p++ {
delete(f.positionMap, f.data.read(p).Mini())
}
// also add everything past numleaves and prevnumleaves to dirt
// which might already be there, inefficient!
// TODO fix this dirt thing
dirt := make([]uint64, len(leafMoves)*2)
// place hashes starting at old post-remove numLeaves. they're off the
// forest bounds to the right; they will be shuffled in to the left.
for i, h := range ub.hashes {
if h == empty {
return fmt.Errorf("hash %d in undoblock is empty", i)
}
f.data.write(f.numLeaves+uint64(i), h)
dirt = append(dirt, f.numLeaves+uint64(i))
}
// go through swaps in reverse order
for i, a := range leafMoves {
f.data.swapHash(a.from, a.to)
dirt[2*i] = a.to // this is wrong, it way over hashes
dirt[(2*i)+1] = a.from // also should be parents
}
// update positionMap. The stuff we do want has been moved in to the forest,
// the stuff we don't want has been moved to the right past the edge
for p := f.numLeaves; p < prevNumLeaves; p++ {
f.positionMap[f.data.read(p).Mini()] = p
}
for _, p := range ub.positions {
f.positionMap[f.data.read(p).Mini()] = p
}
for _, d := range dirt {
// everything that moved needs to have its position updated in the map
// TODO does it..?
m := f.data.read(d).Mini()
oldpos := f.positionMap[m]
if oldpos != d {
delete(f.positionMap, m)
f.positionMap[m] = d
}
}
// rehash above all tos/froms
f.numLeaves = prevNumLeaves // change numLeaves before rehashing
sortUint64s(dirt)
err := f.reHash(dirt)
if err != nil {
return err
}
return nil
}
// BuildUndoData makes an undoBlock from the same data that you'd give to Modify
func (f *Forest) BuildUndoData(numadds uint64, dels []uint64) *UndoBlock {
ub := new(UndoBlock)
ub.numAdds = uint32(numadds)
ub.positions = dels // the deletion positions, in sorted order
ub.hashes = make([]Hash, len(dels))
// populate all the hashes from the left edge of the forest
for i, _ := range ub.positions {
ub.hashes[i] = f.data.read(f.numLeaves + uint64(i))
if ub.hashes[i] == empty {
fmt.Printf("warning, wrote empty hash for position %d\n",
ub.positions[i])
}
}
return ub
}