11# Copyright (c) 2018 Status Research & Development GmbH
22# Distributed under the Apache v2 License (license terms are at http://www.apache.org/licenses/LICENSE-2.0).
33
4- import math, endians,
5- keccak_tiny
4+ import ./ proof_of_work, mining
65
7- import ./ private/ [primes, casting, functional, intmath]
8- export toHex, hexToByteArrayBE, hexToSeqBytesBE, toByteArrayBE # debug functions
9- export keccak_tiny
106
11- # ###############################################################################
12- # Definitions
13-
14- const
15- REVISION * = 23 # Based on spec revision 23
16- WORD_BYTES = 4 # bytes in word - in Nim we use 64 bits words # TODO check that
17- DATASET_BYTES_INIT * = 2 'u ^ 30 # bytes in dataset at genesis
18- DATASET_BYTES_GROWTH * = 2 'u ^ 23 # dataset growth per epoch
19- CACHE_BYTES_INIT * = 2 'u ^ 24 # bytes in cache at genesis
20- CACHE_BYTES_GROWTH * = 2 'u ^ 17 # cache growth per epoch
21- CACHE_MULTIPLIER = 1024 # Size of the DAG relative to the cache
22- EPOCH_LENGTH * = 30000 # blocks per epoch
23- MIX_BYTES * = 128 # width of mix
24- HASH_BYTES * = 64 # hash length in bytes
25- DATASET_PARENTS * = 256 # number of parents of each dataset element
26- CACHE_ROUNDS * = 3 # number of rounds in cache production
27- ACCESSES * = 64 # number of accesses in hashimoto loop
28-
29- # ###############################################################################
30- # Parameters
31-
32- proc get_cache_size * (block_number: uint ): uint {.noSideEffect .}=
33- result = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number div EPOCH_LENGTH )
34- result -= HASH_BYTES
35- while (let dm = divmod (result , HASH_BYTES );
36- dm.rem == 0 and not dm.quot.isPrime):
37- # In a static lang, checking that the result of a division is prime
38- # Means checking that reminder == 0 and quotient is prime
39- result -= 2 * HASH_BYTES
40-
41- proc get_data_size * (block_number: uint ): uint {.noSideEffect .}=
42- result = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number div EPOCH_LENGTH )
43- result -= MIX_BYTES
44- while (let dm = divmod (result , MIX_BYTES );
45- dm.rem == 0 and not dm.quot.isPrime):
46- result -= 2 * MIX_BYTES
47-
48- # ###############################################################################
49- # Fetch from lookup tables of 2048 epochs of data sizes and cache sizes
50- import ./ data_sizes
51-
52- proc get_datasize_lut * (block_number: Natural ): uint64 {.noSideEffect , inline .} =
53- data_sizes[block_number div EPOCH_LENGTH ]
54-
55- proc get_cachesize_lut * (block_number: Natural ): uint64 {.noSideEffect , inline .} =
56- cache_sizes[block_number div EPOCH_LENGTH ]
57-
58- # ###############################################################################
59- # Cache generation
60-
61- proc mkcache * (cache_size: uint64 , seed: Hash [256 ]): seq [Hash [512 ]] {.noSideEffect .}=
62-
63- # Cache size
64- let n = int (cache_size div HASH_BYTES )
65-
66- # Sequentially produce the initial dataset
67- result = newSeq [Hash [512 ]](n)
68- result [0 ] = keccak512 seed.data
69-
70- for i in 1 ..< n:
71- result [i] = keccak512 result [i- 1 ].data
72-
73- # Use a low-round version of randmemohash
74- for _ in 0 ..< CACHE_ROUNDS :
75- for i in 0 ..< n:
76- let
77- v = result [i].as_u32_words[0 ] mod n.uint32
78- a = result [(i- 1 + n) mod n].data
79- b = result [v.int ].data
80- result [i] = keccak512 zipMap (a, b, x xor y)
81-
82- # ###############################################################################
83- # Data aggregation function
84-
85- const FNV_PRIME = 0x 01000193
86-
87- proc fnv * [T: SomeUnsignedInt or Natural ](v1, v2: T): uint32 {.inline , noSideEffect .}=
88-
89- # Original formula is ((v1 * FNV_PRIME) xor v2) mod 2^32
90- # However contrary to Python and depending on the type T,
91- # in Nim (v1 * FNV_PRIME) can overflow
92- # We can't do 2^32 with an int (only 2^32-1)
93- # and in general (a xor b) mod c != (a mod c) xor (b mod c)
94- #
95- # Thankfully
96- # We know that:
97- # - (a xor b) and c == (a and c) xor (b and c)
98- # - for powers of 2: a mod 2^p == a and (2^p - 1)
99- # - 2^32 - 1 == high(uint32)
100-
101- # So casting to uint32 should do the modulo and masking just fine
102-
103- (v1.uint32 * FNV_PRIME ) xor v2.uint32
104-
105- # ###############################################################################
106- # Full dataset calculation
107-
108- proc calc_dataset_item * (cache: seq [Hash [512 ]], i: Natural ): Hash [512 ] {.noSideEffect , noInit .} =
109- let n = cache.len
110- const r: uint32 = HASH_BYTES div WORD_BYTES
111-
112- # Alias for the result value. Interpreted as an array of uint32 words
113- var mix = cast [ptr array [16 , uint32 ]](addr result )
114-
115- mix[] = cache[i mod n].as_u32_words
116- when system.cpuEndian == littleEndian:
117- mix[0 ] = mix[0 ] xor i.uint32
118- else :
119- mix[high (mix)] = mix[high (mix)] xor i.uint32
120- result = keccak512 mix[]
121-
122- # FNV with a lots of random cache nodes based on i
123- for j in 0 'u32 ..< DATASET_PARENTS :
124- let cache_index = fnv (i.uint32 xor j, mix[j mod r])
125- mix[] = zipMap (mix[], cache[cache_index.int mod n].as_u32_words, fnv (x, y))
126-
127- result = keccak512 mix[]
128-
129- proc calc_dataset * (full_size: Natural , cache: seq [Hash [512 ]]): seq [Hash [512 ]] {.noSideEffect .} =
130-
131- result = newSeq [Hash [512 ]](full_size div HASH_BYTES )
132-
133- for i, hash in result .mpairs:
134- hash = calc_dataset_item (cache, i)
135-
136- # ###############################################################################
137- # Main loop
138-
139- type HashimotoHash = tuple [mix_digest: Hash [256 ], value: Hash [256 ]]
140- type DatasetLookup = proc (i: Natural ): Hash [512 ] {.noSideEffect .}
141-
142- proc hashimoto (header: Hash [256 ],
143- nonce: uint64 ,
144- full_size: Natural ,
145- dataset_lookup: DatasetLookup
146- ): HashimotoHash {.noInit , noSideEffect .}=
147- let
148- n = uint32 full_size div HASH_BYTES
149- w = uint32 MIX_BYTES div WORD_BYTES
150- mixhashes = uint32 MIX_BYTES div HASH_BYTES
151-
152- assert full_size mod HASH_BYTES == 0
153- assert MIX_BYTES mod HASH_BYTES == 0
154-
155- # combine header+nonce into a 64 byte seed
156- var s{.noInit .}: Hash [512 ]
157- let s_bytes = cast [ptr array [64 , byte ]](addr s) # Alias for to interpret s as a byte array
158- let s_words = cast [ptr array [16 , uint32 ]](addr s) # Alias for to interpret s as an uint32 array
159-
160- s_bytes[0 ..< 32 ] = header.data # We first populate the first 40 bytes of s with the concatenation
161-
162- var nonceLE{.noInit .}: array [8 , byte ] # the nonce should be concatenated with its LITTLE ENDIAN representation
163- littleEndian64 (addr nonceLE, unsafeAddr nonce)
164- s_bytes [32 ..< 40 ] = cast [array [8 ,byte ]](nonceLE)
165-
166- s = keccak_512 s_bytes[0 ..< 40 ] # TODO : Does this allocate a seq?
167-
168- # start the mix with replicated s
169- assert MIX_BYTES div HASH_BYTES == 2
170- var mix{.noInit .}: array [32 , uint32 ]
171- mix[0 ..< 16 ] = s_words[]
172- mix[16 ..< 32 ] = s_words[]
173-
174- # mix in random dataset nodes
175- for i in 0 'u32 ..< ACCESSES :
176- let p = fnv (i xor s_words[0 ], mix[i mod w]) mod (n div mixhashes) * mixhashes
177-
178- # Unrolled: for j in range(MIX_BYTES / HASH_BYTES): => for j in 0 ..< 2
179- var newdata{.noInit .}: type mix
180- newdata [0 ..< 16 ] = cast [array [16 , uint32 ]](dataset_lookup (p))
181- newdata [16 ..< 32 ] = cast [array [16 , uint32 ]](dataset_lookup (p+ 1 ))
182-
183- mix = zipMap (mix, newdata, fnv (x, y))
184-
185- # compress mix
186- var cmix{.noInit .}: array [8 , uint32 ]
187- for i in countup (0 , mix.len - 1 , 4 ):
188- cmix[i div 4 ] = mix[i].fnv (mix[i+ 1 ]).fnv (mix[i+ 2 ]).fnv (mix[i+ 3 ])
189-
190- # ⚠⚠ Warning ⚠⚠: Another bigEndian littleEndian issue?
191- # It doesn't seem like the uint32 in cmix need to be changed to big endian
192- result .mix_digest = cast [Hash [256 ]](cmix)
193-
194- var concat{.noInit .}: array [64 + 32 , byte ]
195- concat[0 ..< 64 ] = s_bytes[]
196- concat [64 ..< 96 ] = cast [array [32 , byte ]](cmix)
197- result .value = keccak_256 (concat)
198-
199- proc hashimoto_light * (full_size:Natural , cache: seq [Hash [512 ]],
200- header: Hash [256 ], nonce: uint64 ): HashimotoHash {.noSideEffect , inline .} =
201-
202- let light: DatasetLookup = proc (x: Natural ): Hash [512 ] = calc_data_set_item (cache, x)
203- hashimoto (header,
204- nonce,
205- full_size,
206- light)
207-
208- proc hashimoto_full * (full_size:Natural , dataset: seq [Hash [512 ]],
209- header: Hash [256 ], nonce: uint64 ): HashimotoHash {.noSideEffect , inline .} =
210- # TODO spec mentions full_size but I don't think we need it (retrieve it from dataset.len)
211- let full: DatasetLookup = proc (x: Natural ): Hash [512 ] = dataset[x]
212- hashimoto (header,
213- nonce,
214- full_size,
215- full)
216-
217- # ###############################################################################
218- # Defining the seed hash
219-
220- proc get_seedhash * (block_number: uint64 ): Hash [256 ] {.noSideEffect .} =
221- for i in 0 ..< int (block_number div EPOCH_LENGTH ):
222- result = keccak256 result .data
7+ export proof_of_work, mining
0 commit comments