/
difficulty.go
223 lines (204 loc) · 10.4 KB
/
difficulty.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
package consensus
import (
"bytes"
"encoding/binary"
"math/big"
"github.com/HyperspaceApp/Hyperspace/build"
"github.com/HyperspaceApp/Hyperspace/types"
"github.com/HyperspaceApp/errors"
"github.com/coreos/bbolt"
)
// difficulty.go defines the Oak difficulty adjustment algorithm.
//
// A running tally is maintained which keeps the total difficulty and total time
// passed across all blocks. The total difficulty can be divided by the total
// time to get a hashrate. The total is multiplied by 0.995 each block, to keep
// exponential preference on recent blocks with a half life of 144 data points.
// This is about 24 hours. This estimated hashrate is assumed to closely match
// the actual hashrate on the network.
//
// There is a target block time. If the difficulty increases or decreases, the
// total amount of time that has passed will be more or less than the target
// amount of time passed for the current height. To counteract this, the target
// block time for each block is adjusted based on how far away from the desired
// total time passed the current total time passed is. If the total time passed
// is too low, blocks are targeted to be slightly longer, which helps to correct
// the network. And if the total time passed is too high, blocks are targeted to
// be slightly shorter, to help correct the network.
//
// High variance in block times means that the corrective action should not be
// very strong if the total time passed has only missed the target time passed
// by a few hours. But if the total time passed is significantly off, the block
// time corrections should be much stronger. The square of the total deviation
// is used to figure out what the adjustment should be. At 10,000 seconds
// variance (about 3 hours), blocks will be adjusted by 10 seconds each. At
// 20,000 seconds, blocks will be adjusted by 40 seconds each, a 4x adjustment
// for 2x the error. And at 40,000 seconds, blocks will be adjusted by 160
// seconds each, and so on.
//
// The total amount of blocktime adjustment is capped to 1/3 and 3x the target
// blocktime, to prevent too much disruption on the network. If blocks are
// actually coming out 3x as fast as intended, there will be a (temporary)
// significant increase on the amount of strain on nodes to process blocks. And
// at 1/3 the target blocktime, the total blockchain throughput will decrease
// dramatically.
//
// Finally, one extra cap is applied to the difficulty adjustment - the
// difficulty of finding a block is not allowed to change more than 0.4% every
// block. This maps to a total possible difficulty change of 55x across 1008
// blocks. This clamp helps to prevent wild swings when the hashrate increases
// or decreases rapidly on the network, and it also limits the amount of damange
// that a malicious attacker can do if performing a difficulty raising attack.
// childTargetOak sets the child target based on the total time delta and total
// hashrate of the parent block. The deltas are known for the child block,
// however we do not use the child block deltas because that would allow the
// child block to influence the target of the following block, which makes abuse
// easier in selfish mining scenarios.
func (cs *ConsensusSet) childTargetOak(parentTotalTime int64, parentTotalTarget, currentTarget types.Target, parentHeight types.BlockHeight, parentTimestamp types.Timestamp) types.Target {
// This is the public launch of the network
// Sia target at height 164433
// [0 0 0 0 0 0 0 0 105 202 12 225 144 224 144 183 167 243 233 188 79 177 239 138 67 214 220 16 80 74 216 60]
// We target about 1%
if parentHeight == types.BlockHeight(7) && build.Release == "standard" {
return types.Target{0, 0, 0, 0, 0, 0, 0, 40}
}
// Determine the delta of the current total time vs. the desired total time.
// The desired total time is the difference between the genesis block
// timestamp and the current block timestamp.
var delta int64
// This is the correct code. The expected time is an absolute time based
// on the genesis block, and the delta is an absolute time based on the
// timestamp of the parent block.
//
// Rules elsewhere in consensus ensure that the timestamp of the parent
// block has not been manipulated by more than a few hours, which is
// accurate enough for this logic to be safe.
expectedTime := int64(types.BlockFrequency*parentHeight) + int64(types.GenesisTimestamp)
delta = expectedTime - int64(parentTimestamp)
// Convert the delta in to a target block time.
square := delta * delta
if delta < 0 {
// If the delta is negative, restore the negative value.
square *= -1
}
shift := square / 10e6 // 10e3 second delta leads to 10 second shift.
targetBlockTime := int64(types.BlockFrequency) + shift
// Clamp the block time to 1/3 and 3x the target block time.
if targetBlockTime < int64(types.BlockFrequency)/types.OakMaxBlockShift {
targetBlockTime = int64(types.BlockFrequency) / types.OakMaxBlockShift
}
if targetBlockTime > int64(types.BlockFrequency)*types.OakMaxBlockShift {
targetBlockTime = int64(types.BlockFrequency) * types.OakMaxBlockShift
}
// Determine the hashrate using the total time and total target. Set a
// minimum total time of 1 to prevent divide by zero and underflows.
if parentTotalTime < 1 {
parentTotalTime = 1
}
visibleHashrate := parentTotalTarget.Difficulty().Div64(uint64(parentTotalTime)) // Hashes per second.
// Handle divide by zero risks.
if visibleHashrate.IsZero() {
visibleHashrate = visibleHashrate.Add(types.NewCurrency64(1))
}
if targetBlockTime == 0 {
// This code can only possibly be triggered if the block frequency is
// less than 3, but during testing the block frequency is 1.
targetBlockTime = 1
}
// Determine the new target by multiplying the visible hashrate by the
// target block time. Clamp it to a 0.4% difficulty adjustment.
maxNewTarget := currentTarget.MulDifficulty(types.OakMaxRise) // Max = difficulty increase (target decrease)
minNewTarget := currentTarget.MulDifficulty(types.OakMaxDrop) // Min = difficulty decrease (target increase)
newTarget := types.RatToTarget(new(big.Rat).SetFrac(types.RootDepth.Int(), visibleHashrate.Mul64(uint64(targetBlockTime)).Big()))
if newTarget.Cmp(maxNewTarget) < 0 {
newTarget = maxNewTarget
}
if newTarget.Cmp(minNewTarget) > 0 {
// This can only possibly trigger if the BlockFrequency is less than 3
// seconds, but during testing it is 1 second.
newTarget = minNewTarget
}
return newTarget
}
// getBlockTotals returns the block totals values that get stored in
// storeBlockTotals.
func (cs *ConsensusSet) getBlockTotals(tx *bolt.Tx, id types.BlockID) (totalTime int64, totalTarget types.Target) {
totalsBytes := tx.Bucket(BucketOak).Get(id[:])
totalTime = int64(binary.LittleEndian.Uint64(totalsBytes[:8]))
copy(totalTarget[:], totalsBytes[8:])
return
}
// storeBlockTotals computes the new total time and total target for the current
// block and stores that new time in the database. It also returns the new
// totals.
func (cs *ConsensusSet) storeBlockTotals(tx *bolt.Tx, currentHeight types.BlockHeight, currentBlockID types.BlockID, prevTotalTime int64, parentTimestamp, currentTimestamp types.Timestamp, prevTotalTarget, targetOfCurrentBlock types.Target) (newTotalTime int64, newTotalTarget types.Target, err error) {
// Reset the prevTotalTime to a delta of zero just before the hardfork.
// For each value, first multiply by the decay, and then add in the new
// delta.
newTotalTime = (prevTotalTime * types.OakDecayNum / types.OakDecayDenom) + (int64(currentTimestamp) - int64(parentTimestamp))
newTotalTarget = prevTotalTarget.MulDifficulty(big.NewRat(types.OakDecayNum, types.OakDecayDenom)).AddDifficulties(targetOfCurrentBlock)
// Store the new total time and total target in the database at the
// appropriate id.
bytes := make([]byte, 40)
binary.LittleEndian.PutUint64(bytes[:8], uint64(newTotalTime))
copy(bytes[8:], newTotalTarget[:])
err = tx.Bucket(BucketOak).Put(currentBlockID[:], bytes)
if err != nil {
return 0, types.Target{}, errors.Extend(errors.New("unable to store total time values"), err)
}
return newTotalTime, newTotalTarget, nil
}
// initOak will initialize all of the oak difficulty adjustment related fields.
// This is separate from the initialization process for compatibility reasons -
// some databases will not have these fields at start, so it much be checked.
//
// After oak initialization is complete, a specific field in the oak bucket is
// marked so that oak initialization can be skipped in the future.
func (cs *ConsensusSet) initOak(tx *bolt.Tx) error {
// Prep the oak bucket.
bucketOak, err := tx.CreateBucketIfNotExists(BucketOak)
if err != nil {
return errors.Extend(errors.New("unable to create oak bucket"), err)
}
// Check whether the init field is set.
if bytes.Equal(bucketOak.Get(FieldOakInit), ValueOakInit) {
// The oak fields have been initialized, nothing to do.
return nil
}
height := blockHeight(tx)
// Store base values for the genesis block.
totalTime, totalTarget, err := cs.storeBlockTotals(tx, 0, types.GenesisID, 0, types.GenesisTimestamp, types.GenesisTimestamp, types.RootDepth, types.RootTarget)
if err != nil {
return errors.Extend(errors.New("unable to store genesis block totals"), err)
}
// The Oak fields have not been initialized, scan through the consensus set
// and set the fields for each block.
parentTimestamp := types.GenesisTimestamp
parentChildTarget := types.RootTarget
for i := types.BlockHeight(1); i <= height; i++ { // Skip Genesis block
// Fetch the processed block for the current block.
id, err := getPath(tx, i)
if err != nil {
return errors.Extend(errors.New("unable to find block at height"), err)
}
pb, err := getBlockMap(tx, id)
if err != nil {
return errors.Extend(errors.New("unable to find block from id"), err)
}
// Calculate and store the new block totals.
totalTime, totalTarget, err = cs.storeBlockTotals(tx, i, id, totalTime, parentTimestamp, pb.Block.Timestamp, totalTarget, parentChildTarget)
if err != nil {
return errors.Extend(errors.New("unable to store updated block totals"), err)
}
// Update the previous values.
parentTimestamp = pb.Block.Timestamp
parentChildTarget = pb.ChildTarget
}
// Tag the initialization field in the oak bucket, indicating that
// initialization has completed.
err = bucketOak.Put(FieldOakInit, ValueOakInit)
if err != nil {
return errors.Extend(errors.New("unable to put oak init confirmation into oak bucket"), err)
}
return nil
}