/
reward_state.go
108 lines (83 loc) · 4.56 KB
/
reward_state.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
package reward
import (
abi "github.com/chenjianmei111/go-state-types/abi"
big "github.com/chenjianmei111/go-state-types/big"
"github.com/chenjianmei111/go-state-types/network"
"github.com/chenjianmei111/specs-actors/actors/runtime"
"github.com/chenjianmei111/specs-actors/actors/util/smoothing"
)
// A quantity of space * time (in byte-epochs) representing power committed to the network for some duration.
type Spacetime = big.Int
// 36.266260308195979333 FIL
// https://www.wolframalpha.com/input/?i=IntegerPart%5B330%2C000%2C000+*+%281+-+Exp%5B-Log%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29%5D%29+*+10%5E18%5D
const InitialRewardPositionEstimateStr = "36266260308195979333"
var InitialRewardPositionEstimate = big.MustFromString(InitialRewardPositionEstimateStr)
// -1.0982489*10^-7 FIL per epoch. Change of simple minted tokens between epochs 0 and 1
// https://www.wolframalpha.com/input/?i=IntegerPart%5B%28Exp%5B-Log%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29%5D+-+1%29+*+10%5E18%5D
var InitialRewardVelocityEstimate = abi.NewTokenAmount(-109897758509)
type State struct {
// CumsumBaseline is a target CumsumRealized needs to reach for EffectiveNetworkTime to increase
// CumsumBaseline and CumsumRealized are expressed in byte-epochs.
CumsumBaseline Spacetime
// CumsumRealized is cumulative sum of network power capped by BalinePower(epoch)
CumsumRealized Spacetime
// EffectiveNetworkTime is ceiling of real effective network time `theta` based on
// CumsumBaselinePower(theta) == CumsumRealizedPower
// Theta captures the notion of how much the network has progressed in its baseline
// and in advancing network time.
EffectiveNetworkTime abi.ChainEpoch
// EffectiveBaselinePower is the baseline power at the EffectiveNetworkTime epoch
EffectiveBaselinePower abi.StoragePower
// The reward to be paid in per WinCount to block producers.
// The actual reward total paid out depends on the number of winners in any round.
// This value is recomputed every non-null epoch and used in the next non-null epoch.
ThisEpochReward abi.TokenAmount
// Smoothed ThisEpochReward
ThisEpochRewardSmoothed *smoothing.FilterEstimate
// The baseline power the network is targeting at st.Epoch
ThisEpochBaselinePower abi.StoragePower
// Epoch tracks for which epoch the Reward was computed
Epoch abi.ChainEpoch
// TotalMined tracks the total FIL awared to block miners
TotalMined abi.TokenAmount
}
func ConstructState(rt runtime.Runtime, currRealizedPower abi.StoragePower) *State {
st := &State{
CumsumBaseline: big.Zero(),
CumsumRealized: big.Zero(),
EffectiveNetworkTime: 0,
EffectiveBaselinePower: BaselineInitialValueV0,
ThisEpochReward: big.Zero(),
ThisEpochBaselinePower: InitBaselinePower(),
Epoch: -1,
ThisEpochRewardSmoothed: smoothing.NewEstimate(InitialRewardPositionEstimate, InitialRewardVelocityEstimate),
TotalMined: big.Zero(),
}
st.updateToNextEpochWithReward(rt, currRealizedPower, network.Version0)
return st
}
// Takes in current realized power and updates internal state
// Used for update of internal state during null rounds
func (st *State) updateToNextEpoch(currRealizedPower abi.StoragePower, nv network.Version) {
st.Epoch++
st.ThisEpochBaselinePower = BaselinePowerFromPrev(st.ThisEpochBaselinePower, nv)
cappedRealizedPower := big.Min(st.ThisEpochBaselinePower, currRealizedPower)
st.CumsumRealized = big.Add(st.CumsumRealized, cappedRealizedPower)
for st.CumsumRealized.GreaterThan(st.CumsumBaseline) {
st.EffectiveNetworkTime++
st.EffectiveBaselinePower = BaselinePowerFromPrev(st.EffectiveBaselinePower, nv)
st.CumsumBaseline = big.Add(st.CumsumBaseline, st.EffectiveBaselinePower)
}
}
// Takes in a current realized power for a reward epoch and computes
// and updates reward state to track reward for the next epoch
func (st *State) updateToNextEpochWithReward(rt runtime.Runtime, currRealizedPower abi.StoragePower, nv network.Version) {
prevRewardTheta := computeRTheta(st.EffectiveNetworkTime, st.EffectiveBaselinePower, st.CumsumRealized, st.CumsumBaseline)
st.updateToNextEpoch(currRealizedPower, nv)
currRewardTheta := computeRTheta(st.EffectiveNetworkTime, st.EffectiveBaselinePower, st.CumsumRealized, st.CumsumBaseline)
st.ThisEpochReward = computeReward(rt, st.Epoch, prevRewardTheta, currRewardTheta)
}
func (st *State) updateSmoothedEstimates(delta abi.ChainEpoch) {
filterReward := smoothing.LoadFilter(st.ThisEpochRewardSmoothed, smoothing.DefaultAlpha, smoothing.DefaultBeta)
st.ThisEpochRewardSmoothed = filterReward.NextEstimate(st.ThisEpochReward, delta)
}