-
Notifications
You must be signed in to change notification settings - Fork 4
/
main.cpp
713 lines (630 loc) · 21.6 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
#include "PlatformWrap.h"
#include "HashFunctions/city.h"
#include "HashFunctions/farmhash.h"
#include "HashFunctions/mum.h"
#include "HashFunctions/MurmurHash2.h"
#include "HashFunctions/MurmurHash3.h"
#include "HashFunctions/SimpleHashFunctions.h"
#include "HashFunctions/sha1.h"
#include "HashFunctions/SpookyV2.h"
#define XXH_INLINE_ALL
#include "HashFunctions/xxhash.h"
#include "HashFunctions/t1ha.h"
#if __x86_64__ || _M_AMD64
#define MEOW_AVAILABLE 1
#endif
#if MEOW_AVAILABLE
#include "HashFunctions/meow_hash_x64_aesni.h"
#endif
#include <vector>
#include <string>
#include <unordered_set>
#include <map>
#include <stdio.h>
#include <math.h>
#include <inttypes.h>
#if PLATFORM_ANDROID
android_app* g_AndroidApp;
#endif
FILE* g_OutputFile = stdout;
extern void crc32 (const void * key, int len, uint32_t seed, void * out);
extern void md5_32 (const void * key, int len, uint32_t /*seed*/, void * out);
extern "C" int siphash(uint8_t *out, const uint8_t *in, uint64_t inlen, const uint8_t *k);
// ------------------------------------------------------------------------------------
// Data sets & reading them from file
struct DataSet
{
DataSet() : totalSize(0) { }
typedef std::pair<size_t,size_t> OffsetAndSize;
std::string name;
std::vector<char> buffer; // raw file contents
std::vector<OffsetAndSize> entries; // entries in the file data array, each has offset and length
size_t totalSize; // total size of data that will be hashed (file size, minus all entry delimiters)
};
static DataSet* ReadDataSet(const char* folderName, const char* filenameStr)
{
std::string filename = std::string(filenameStr);
# if PLATFORM_IOS || PLATFORM_XBOXONE
filename.erase(0, 9); // remove TestData/ on iOS/XB1; files in resources only retain the filenames
# endif
# if !PLATFORM_ANDROID
std::string fullPath = std::string(folderName) + filename;
FILE* f = fopen(fullPath.c_str(), "rb");
if (!f)
{
fprintf(g_OutputFile, "error: can't open dataset file '%s'\n", filename.c_str());
return NULL;
}
# else
AAsset* asset = AAssetManager_open(g_AndroidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING);
if (!asset)
{
fprintf(g_OutputFile, "error: can't open dataset file '%s'\n", filename.c_str());
return NULL;
}
# endif
DataSet* data = new DataSet();
data->name = filename;
# if !PLATFORM_ANDROID
fseek(f, 0, SEEK_END);
size_t size = ftell(f);
fseek(f, 0, SEEK_SET);
data->buffer.resize(size);
char* buffer = data->buffer.data();
fread(buffer, size, 1, f);
# else
size_t size = AAsset_getLength(asset);
assert(size > 0);
data->buffer.resize(size);
char* buffer = data->buffer.data();
AAsset_read(asset, buffer, size);
AAsset_close(asset);
# endif
size_t pos = 0;
size_t wordStart = 0;
data->totalSize = 0;
while (pos < size)
{
if (buffer[pos] == '\n')
{
size_t wordEnd = pos;
// remove any trailing Windows style newlines
while (wordEnd > wordStart+1 && buffer[wordEnd-1] == '\r')
--wordEnd;
data->entries.push_back(std::make_pair(wordStart, wordEnd-wordStart));
data->totalSize += wordEnd-wordStart;
wordStart = pos+1;
}
++pos;
}
#if !PLATFORM_ANDROID
fclose(f);
#endif
return data;
}
// ------------------------------------------------------------------------------------
// Hash function testing code
inline uint32_t NextPowerOfTwo(uint32_t v)
{
v -= 1;
v |= v >> 16;
v |= v >> 8;
v |= v >> 4;
v |= v >> 2;
v |= v >> 1;
return v + 1;
}
struct Result
{
struct DataSetResult
{
DataSetResult() : hashsum(0), collisions(0), hashtabCollisionsIncrease(0) { }
uint64_t hashsum;
int collisions;
float hashtabCollisionsIncrease; // % of how much hashtable collisions we'd get, compared to an ideal hash
};
struct PerfResult
{
PerfResult() : length(0), mbps(0), mbpsAligned(0) { }
int length;
int mbps;
int mbpsAligned;
};
Result() : hashsum(0) { mbpsPerLength.reserve(32); }
std::string name;
std::vector<PerfResult> mbpsPerLength;
std::vector<DataSetResult> datasets;
uint64_t hashsum;
};
// http://stackoverflow.com/questions/9104504/expected-number-of-hash-collisions
static double CalculateExpectedCollisions(size_t bucketCount, size_t entryCount)
{
double m = bucketCount;
double n = entryCount;
double e = n - m * (1 - pow((m-1)/m, n));
return e;
}
namespace std
{
template<> struct hash<myuint128_t>
{
std::size_t operator()(const myuint128_t& p) const noexcept
{
return p.a ^ p.b;
}
};
}
template<typename Hasher>
void TestQualityOnDataSet(const DataSet& dataset, Result::DataSetResult& outResult)
{
Hasher hasher;
// test for "hash quality":
// unique hashes found in all the entries (#entries - uniq == how many collisions found)
std::unordered_set<typename Hasher::HashType> uniq;
// unique buckets that we'd end up with, if we had a hashtable with a load factor of 0.8 that is
// always power of two size.
std::unordered_set<typename Hasher::HashType> uniqModulo;
const size_t entryCount = dataset.entries.size();
size_t hashtableSize = NextPowerOfTwo(entryCount / 0.8);
double expectedCollisons = CalculateExpectedCollisions(hashtableSize, entryCount);
outResult.hashsum = 0;
for (size_t i = 0; i != entryCount; ++i)
{
typename Hasher::HashType h = hasher(dataset.buffer.data() + dataset.entries[i].first, dataset.entries[i].second);
outResult.hashsum = h ^ outResult.hashsum;
uniq.insert(h);
uniqModulo.insert(h % hashtableSize);
}
outResult.collisions = (int)(entryCount - uniq.size());
double hashtabCollisions = entryCount - uniqModulo.size();
double collisionsIncrease = (hashtabCollisions / expectedCollisons - 1.0) * 100;
if (collisionsIncrease < 0)
collisionsIncrease = 0;
outResult.hashtabCollisionsIncrease = collisionsIncrease;
}
const size_t kSyntheticDataTotalSize = 1024 * 1024 * 1;
const int kSyntheticDataIterations = 9;
// synthetic hash performance test on various string lengths
template<typename Hasher>
void TestPerformancePerLength(const std::vector<uint8_t>& data, bool aligned, Result& outResult)
{
Hasher hasher;
int step = 2;
int index = 0;
for (int len = 2; len < 5000; len += step, step += step/2, ++index)
{
size_t dataLen = data.size();
const uint8_t* dataPtr = data.data();
TimerBegin();
size_t pos = 0;
size_t lenAligned = len;
//if (aligned)
lenAligned = (lenAligned + 63) & ~63;
size_t totalBytes = 0;
while (pos + len < dataLen)
{
outResult.hashsum = hasher(dataPtr + pos, len) ^ outResult.hashsum;
pos += lenAligned;
totalBytes += len;
}
float sec = TimerEnd();
// MB/s
float mbps = (float)((totalBytes / 1024.0 / 1024.0) / (sec+1.0e-20f));
if (index < outResult.mbpsPerLength.size())
{
// if we got higher MB/s, use that (i.e. out of all iterations, we pick fastest one)
assert(outResult.mbpsPerLength[index].length == len);
if (aligned)
{
if (mbps > outResult.mbpsPerLength[index].mbpsAligned)
outResult.mbpsPerLength[index].mbpsAligned = mbps;
}
else
{
if (mbps > outResult.mbpsPerLength[index].mbps)
outResult.mbpsPerLength[index].mbps = mbps;
}
}
else
{
// add result if no previous iterations did it yet
Result::PerfResult res;
res.length = len;
if (aligned)
res.mbpsAligned = mbps;
else
res.mbps = mbps;
outResult.mbpsPerLength.push_back(res);
}
}
}
// ------------------------------------------------------------------------------------
// Individual hash functions for use in the testing code above
struct HasherXXH32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return XXH32(data, size, 0x1234); }
};
struct HasherXXH64_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return (HashType)XXH64(data, size, 0x1234); }
};
struct HasherXXH64 : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { return XXH64(data, size, 0x1234); }
};
struct HasherXXH3_64 : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { return XXH3_64bits_withSeed(data, size, 0x1234); }
};
struct HasherXXH3_128 : public Hasher128Bit
{
HashType operator()(const void* data, size_t size) const { XXH128_hash_t r = XXH3_128bits_withSeed(data, size, 0x1234); return myuint128_t(r.low64, r.high64); }
};
#if MEOW_AVAILABLE
struct HasherMeow_128 : public Hasher128Bit
{
HashType operator()(const void* data, size_t size) const { meow_u128 h = MeowHash(MeowDefaultSeed, size, (void*)data); return myuint128_t(MeowU64From(h, 0), MeowU64From(h, 1)); }
};
struct HasherMeow_64 : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { meow_u128 h = MeowHash(MeowDefaultSeed, size, (void*)data); return MeowU64From(h, 0); }
};
#endif
struct HasherT1HA2_128 : public Hasher128Bit
{
HashType operator()(const void* data, size_t size) const { myuint128_t r; r.a = t1ha2_atonce128(&r.b, data, size, 0x1234); return r; }
};
struct HasherT1HA2_64 : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { return t1ha2_atonce(data, size, 0x1234); }
};
struct HasherSpookyV2_128 : public Hasher128Bit
{
HashType operator()(const void* data, size_t size) const { myuint128_t r; r.a = 0x1234; r.b = 0; SpookyHash::Hash128(data, (int)size, &r.a, &r.b); return r; }
};
struct HasherSpookyV2_64 : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { return SpookyHash::Hash64(data, (int)size, 0x1234); }
};
struct HasherMurmur2A : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return MurmurHash2A(data, (int)size, 0x1234); }
};
struct HasherMurmur3_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { HashType res; MurmurHash3_x86_32(data, (int)size, 0x1234, &res); return res; }
};
struct HasherMurmur3_x64_128 : public Hasher128Bit
{
HashType operator()(const void* data, size_t size) const { myuint128_t r; MurmurHash3_x64_128(data, (int)size, 0x1234, &r); return r; }
};
struct HasherMum_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return (uint32_t)mum_hash(data, size, 0x1234); }
};
struct HasherMum : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { return mum_hash(data, size, 0x1234); }
};
struct HasherCity32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return CityHash32((const char*)data, size); }
};
struct HasherCity64_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return (uint32_t)CityHash64((const char*)data, size); }
};
struct HasherCity64 : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { return CityHash64((const char*)data, size); }
};
struct HasherFarm32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return util::Hash32((const char*)data, size); }
};
struct HasherFarm64_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { return (uint32_t)util::Hash64((const char*)data, size); }
};
struct HasherFarm64 : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { return util::Hash64((const char*)data, size); }
};
// Reference SipHash implementation, https://github.com/veorq/SipHash
static const uint8_t kSipHashKey[16] = {0x75,0x4E,0x3F,0x38, 0x21,0x0A,0xFE,0x71, 0x9D,0xDC,0x54,0x72, 0x09,0x1A,0xD4,0x79};
struct HasherSipRef_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { uint64_t res; siphash((uint8_t*)&res, (const uint8_t*)data, size, kSipHashKey); return (HashType)res; }
};
struct HasherSipRef : public Hasher64Bit
{
HashType operator()(const void* data, size_t size) const { uint64_t res; siphash((uint8_t*)&res, (const uint8_t*)data, size, kSipHashKey); return res; }
};
struct HasherCRC32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const { HashType res; crc32(data, (int)size, 0x1234, &res); return res; }
};
struct HasherMD5_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const
{
// Could do this on Apple platforms, from <CommonCrypto/CommonDigest.h> -- quick test
// shows that it's around 25% faster, but that does not change things much.
//uint32_t res[4]; CC_MD5(data, (unsigned int)size, (unsigned char*)res); return res[0];
HashType res; md5_32(data, (int)size, 0x1234, &res); return res;
}
};
struct HasherSHA1_32 : public Hasher32Bit
{
HashType operator()(const void* data, size_t size) const
{
// Could do this on Apple platforms, from <CommonCrypto/CommonDigest.h> -- quick test
// shows that it's around 25% faster, but that does not change things much.
//uint32_t res[5]; CC_SHA1(data, (unsigned int)size, (unsigned char*)res); return res[0];
HashType res; sha1_32a(data, (int)size, 0x1234, &res); return res;
}
};
// ------------------------------------------------------------------------------------
// Main program
static std::vector<DataSet*> g_DataSets;
static std::vector<uint8_t> g_SyntheticData;
static std::vector<Result> g_Results;
typedef void (*TestHashQualityFunc)(const DataSet& dataset, Result::DataSetResult& outResult);
typedef void (*TestHashPerfFunc)(const std::vector<uint8_t>& data, bool aligned, Result& outResult);
struct HashToTest
{
const char* name;
TestHashQualityFunc qualityFunc;
TestHashPerfFunc perfFunc;
bool excludeFromPerf;
};
static std::vector<HashToTest> g_Hashes;
static void AddHash(const char* name, TestHashQualityFunc qualityFunc, TestHashPerfFunc perfFunc, bool excludeFromPerf)
{
HashToTest h;
h.name = name;
h.qualityFunc = qualityFunc;
h.perfFunc = perfFunc;
h.excludeFromPerf = excludeFromPerf;
g_Hashes.push_back(h);
}
static void CreateSyntheticData()
{
g_SyntheticData.resize(kSyntheticDataTotalSize);
for (size_t i = 0; i < kSyntheticDataTotalSize; ++i)
g_SyntheticData[i] = i;
}
static void LoadDataSets(const char* folderName)
{
// Basic collisions / hash quality tests on some real world data I had lying around:
// - Dictionary of English words from /usr/share/dict/words
// - A bunch of file relative paths + filenames from several Unity projects & test suites.
// Imaginary use case, hashing filenames in some asset database / file storage system.
// - C++ source code, this was partial Unity sourcecode dump. I'm not releasing this one :),
// but it was 6069 entries, 43.7MB total size, average size 7546.6 bytes.
// - Mostly binary data. I instrumented hash function calls, as used in Unity engine graphics
// related parts, to dump actually hashed data into a log file. Unlike the test sets above,
// most of the data here is binary, and represents snapshots of some internal structs in
// memory.
DataSet* data;
data = ReadDataSet(folderName, "TestData/test-words.txt"); if (data) g_DataSets.push_back(data);
data = ReadDataSet(folderName, "TestData/test-filenames.txt"); if (data) g_DataSets.push_back(data);
data = ReadDataSet(folderName, "TestData/test-code.txt"); if (data) g_DataSets.push_back(data);
data = ReadDataSet(folderName, "TestData/test-binary.bin"); if (data) g_DataSets.push_back(data);
}
static void PrintResults(float qtime)
{
fprintf(g_OutputFile, "**** Quality evaluation (took %.2fs)\n", qtime);
for (size_t id = 0; id < g_DataSets.size(); ++id)
{
const DataSet& data = *g_DataSets[id];
fprintf(g_OutputFile, "%s, %i entries, %.1f MB size, avg length %.1f\n", data.name.c_str(), (int)data.entries.size(), data.totalSize / 1024.0 / 1024.0, double(data.totalSize) / data.entries.size());
fprintf(g_OutputFile, "HashAlgorithm Colis HTColsIncrease hashsum\n");
for (size_t ia = 0; ia < g_Results.size(); ++ia)
{
const Result::DataSetResult& res = g_Results[ia].datasets[id];
fprintf(g_OutputFile, "%15s %4i %6i %016" PRIx64 "\n", g_Results[ia].name.c_str(), res.collisions, (int)res.hashtabCollisionsIncrease, res.hashsum);
}
}
fprintf(g_OutputFile, "\n**** Performance evaluation, MB/s\n");
fprintf(g_OutputFile, "DataSize,");
for (size_t ia = 0; ia < g_Hashes.size(); ++ia)
{
if (g_Hashes[ia].excludeFromPerf)
continue;
fprintf(g_OutputFile, "%s,", g_Hashes[ia].name);
}
fprintf(g_OutputFile, "\n");
for (size_t is = 0; is < g_Results[0].mbpsPerLength.size(); ++is)
{
fprintf(g_OutputFile, "%i,", g_Results[0].mbpsPerLength[is].length);
for (size_t ia = 0; ia < g_Results.size(); ++ia)
{
if (g_Hashes[ia].excludeFromPerf)
continue;
fprintf(g_OutputFile, "%i,", (int)g_Results[ia].mbpsPerLength[is].mbps);
}
fprintf(g_OutputFile, "\n");
}
fprintf(g_OutputFile, "\n");
fprintf(g_OutputFile, "\n**** Aligned data performance evaluation, MB/s\n");
fprintf(g_OutputFile, "DataSize,");
for (size_t ia = 0; ia < g_Hashes.size(); ++ia)
{
if (g_Hashes[ia].excludeFromPerf)
continue;
fprintf(g_OutputFile, "%s,", g_Hashes[ia].name);
}
fprintf(g_OutputFile, "\n");
for (size_t is = 0; is < g_Results[0].mbpsPerLength.size(); ++is)
{
fprintf(g_OutputFile, "%i,", g_Results[0].mbpsPerLength[is].length);
for (size_t ia = 0; ia < g_Results.size(); ++ia)
{
if (g_Hashes[ia].excludeFromPerf)
continue;
fprintf(g_OutputFile, "%i,", (int)g_Results[ia].mbpsPerLength[is].mbpsAligned);
}
fprintf(g_OutputFile, "\n");
}
fprintf(g_OutputFile, "\n");
}
extern "C" void HashFunctionsTestEntryPoint(const char* folderName)
{
// load data
fprintf(g_OutputFile, "Loading data\n");
CreateSyntheticData();
LoadDataSets(folderName);
g_Results.reserve(50);
// setup hash functions to test
# define ADDHASH(name,clazz,exclude) AddHash(name, TestQualityOnDataSet<clazz>, TestPerformancePerLength<clazz>, exclude)
#if MEOW_AVAILABLE
ADDHASH("Meow-128", HasherMeow_128, 0);
#endif
ADDHASH("XXH3-128", HasherXXH3_128, 0);
ADDHASH("t1ha2-128", HasherT1HA2_128, 0);
ADDHASH("SpookyV2-128", HasherSpookyV2_128, 0);
/*
#if MEOW_AVAILABLE
ADDHASH("Meow-64", HasherMeow_64, 0);
#endif
ADDHASH("XXH3-64", HasherXXH3_64, 0);
ADDHASH("xxHash64", HasherXXH64, 0);
ADDHASH("xxHash64-32", HasherXXH64_32, 1);
ADDHASH("t1ha2-64", HasherT1HA2_64, 0);
ADDHASH("SpookyV2-64", HasherSpookyV2_64, 0);
ADDHASH("City64", HasherCity64, 0);
ADDHASH("City64-32", HasherCity64_32, 1);
ADDHASH("Mum", HasherMum, 0);
ADDHASH("Farm64", HasherFarm64, 0);
ADDHASH("Farm64-32", HasherFarm64_32, 1);
ADDHASH("xxHash32", HasherXXH32, 0);
ADDHASH("Murmur3-X64-128", HasherMurmur3_x64_128, 0);
ADDHASH("Murmur2A", HasherMurmur2A, 1);
ADDHASH("Murmur3-32", HasherMurmur3_32, 1);
ADDHASH("Mum-32", HasherMum_32, 1);
ADDHASH("City32", HasherCity32, 1);
ADDHASH("Farm32", HasherFarm32, 1);
ADDHASH("SipRef", HasherSipRef, 1);
ADDHASH("SipRef-32", HasherSipRef_32, 1);
ADDHASH("CRC32", HasherCRC32, 1);
ADDHASH("MD5-32", HasherMD5_32, 1);
ADDHASH("SHA1-32", HasherSHA1_32, 1);
ADDHASH("FNV-1amod", FNV1aModifiedHash, 1);
ADDHASH("djb2", djb2_hash, 1);
*/
# undef ADDHASH
// do quality evaluations on all hash functions
fprintf(g_OutputFile, "Doing quality evals...\n ");
TimerBegin();
for (size_t i = 0; i < g_Hashes.size(); ++i)
{
const HashToTest& hash = g_Hashes[i];
g_Results.push_back(Result());
Result& res = g_Results.back();
res.name = hash.name;
res.datasets.resize(g_DataSets.size());
fprintf(g_OutputFile, "%s ", hash.name);
fflush(g_OutputFile);
for (size_t id = 0, nd = g_DataSets.size(); id != nd; ++id)
{
hash.qualityFunc(*g_DataSets[id], res.datasets[id]);
}
}
fprintf(g_OutputFile, "\n");
float qtime = TimerEnd();
// Do performance evaluations on all hash functions.
// Perform several iterations: for (iterations) { for (hashes) { DoPerfTest } }.
// Iterations are performed in the outer loop, so that any clock changes affect
// all hash functions in a fair way.
fprintf(g_OutputFile, "Doing performance evals...\n");
for (int iter = 0; iter < kSyntheticDataIterations; ++iter)
{
fprintf(g_OutputFile, " iter %i/%i\n", iter+1, kSyntheticDataIterations);
for (size_t i = 0; i < g_Hashes.size(); ++i)
{
const HashToTest& hash = g_Hashes[i];
if (hash.excludeFromPerf)
continue;
Result& res = g_Results[i];
hash.perfFunc(g_SyntheticData, false, res);
}
}
fprintf(g_OutputFile, " aligned data...\n");
for (int iter = 0; iter < kSyntheticDataIterations; ++iter)
{
fprintf(g_OutputFile, " iter %i/%i\n", iter+1, kSyntheticDataIterations);
for (size_t i = 0; i < g_Hashes.size(); ++i)
{
const HashToTest& hash = g_Hashes[i];
if (hash.excludeFromPerf)
continue;
Result& res = g_Results[i];
hash.perfFunc(g_SyntheticData, true, res);
}
}
// print results
PrintResults(qtime);
}
// iOS & XB1 has main entry points elsewhere
#if !PLATFORM_IOS && !PLATFORM_XBOXONE && !PLATFORM_ANDROID
int main()
{
#if PLATFORM_PS4
const char* folderName = "/app0/";
#else
const char* folderName = "";
#endif
HashFunctionsTestEntryPoint(folderName);
return 0;
}
#endif // #if !PLATFORM_IOS && !PLATFORM_XBOXONE && !PLATFORM_ANDROID
// Android main entry points
#if PLATFORM_ANDROID
void handleAppCommand(android_app * app, int32_t cmd)
{
switch (cmd)
{
case APP_CMD_INIT_WINDOW:
// Application startup
{
LOGI("Starting");
std::string appPath;
appPath = "/sdcard/hashtest_android_results.txt";
g_OutputFile = fopen(appPath.c_str(), "w+");
HashFunctionsTestEntryPoint("");
fflush(g_OutputFile);
fclose(g_OutputFile);
LOGI("Finished");
ANativeActivity_finish(g_AndroidApp->activity);
}
break;
}
}
void android_main(android_app* state)
{
app_dummy();
g_AndroidApp = state;
state->onAppCmd = handleAppCommand;
bool destroy = false;
while(1)
{
int ident, events;
struct android_poll_source* source;
while ((ident = ALooper_pollAll(0, NULL, &events, (void**)&source)) >= 0)
{
if (source != NULL)
{
source->process(g_AndroidApp, source);
}
if (g_AndroidApp->destroyRequested != 0)
{
destroy = true;
break;
}
}
if (destroy)
break;
}
}
#endif // #if PLATFORM_ANDROID