/
crc32c.cc
393 lines (340 loc) · 12.7 KB
/
crc32c.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
* Copyright (C) 2013 Mark Adler
* Version 1.1 1 Aug 2013 Mark Adler
*/
/*
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler
madler@alumni.caltech.edu
*/
//
// Software and Hardware Assisted CRC32-C function.
//
// This is an altered/adapted version of Mark Adler's crc32c.c
// - See http://stackoverflow.com/a/17646775
// - See above license.
// - This module provides a software only crc32c
// and cpuid code to enable HW assist.
//
// Changes from orginal version include.
// a) Compiler intrinsics instead of inline asm.
// b) Some re-styling, commenting and code style safety.
// i) no if or loops without braces.
// ii) variable initialisation.
// c) GCC/CLANG/MSVC safe.
// d) C++ casting and limits.
// e) Benchmarked and tuned.
// i) The 3way optimised version is slower for data sizes < 3xSHORT_BLOCK
// so fall back to a SHORT_BLOCK only mode or a single issue version.
// ii) See crc32c_bench.cc for testing
// f) Validated with IETF test vectors.
// i) See crc32c_test.cc.
// g) Use of GCC4.8 attributes to select SSE4.2 vs SW version/
// h) Custom cpuid code works for GCC(<4.8), CLANG and MSVC.
// i) Use static initialistion instead of pthread_once.
//
#include "crc32c_private.h"
#include <platform/crc32c.h>
#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <folly/CpuId.h>
#if defined(__linux__)
#include <sys/auxv.h>
#endif
static bool setup_tables();
static bool tables_setup = setup_tables();
const uint32_t CRC32C_POLYNOMIAL_REV = 0x82F63B78;
const int TABLE_X = 8, TABLE_Y = 256;
static uint32_t crc32c_sw_lookup_table[TABLE_X][TABLE_Y];
/* Tables for hardware crc that shift a crc by LONG and SHORT zeros. */
uint32_t crc32c_long[SHIFT_TABLE_X][SHIFT_TABLE_Y];
uint32_t crc32c_short[SHIFT_TABLE_X][SHIFT_TABLE_Y];
/* Multiply a matrix times a vector over the Galois field of two elements,
GF(2). Each element is a bit in an unsigned integer. mat must have at
least as many entries as the power of two for most significant one bit in
vec. */
static inline uint32_t gf2_matrix_times(const uint32_t *mat, uint32_t vec) {
uint32_t sum = 0;
while (vec > 0) {
if (vec & 1) {
sum ^= *mat;
}
vec >>= 1;
mat++;
}
return sum;
}
/* Multiply a matrix by itself over GF(2). Both mat and square must have 32
rows. The result is written to 'square' */
static inline void gf2_matrix_square(uint32_t *square, const uint32_t *mat) {
int n = 0;
for (n = 0; n < 32; n++) {
square[n] = gf2_matrix_times(mat, mat[n]);
}
}
/* Construct an operator to apply len zeros to a crc. len must be a power of
two. If len is not a power of two, then the result is the same as for the
largest power of two less than len. The result for len == 0 is the same as
for len == 1. A version of this routine could be easily written for any
len, but that is not needed for this application. */
static void crc32c_zeros_op(uint32_t *even, size_t len) {
int n = 0;
uint32_t row = 1;
uint32_t odd[32]; /* odd-power-of-two zeros operator */
/* put operator for one zero bit in odd */
odd[0] = CRC32C_POLYNOMIAL_REV; /* CRC-32C polynomial */
for (n = 1; n < 32; n++) {
odd[n] = row;
row <<= 1;
}
/* put operator for two zero bits in even */
gf2_matrix_square(even, odd);
/* put operator for four zero bits in odd */
gf2_matrix_square(odd, even);
/* first square will put the operator for one zero byte (eight zero bits),
in even -- buf square puts operator for two zero bytes in odd, and so
on, until len has been rotated down to zero */
do {
gf2_matrix_square(even, odd);
len >>= 1;
if (len == 0) {
return;
}
gf2_matrix_square(odd, even);
len >>= 1;
} while (len > 0);
/* answer ended up in odd -- copy to even */
for (n = 0; n < 32; n++) {
even[n] = odd[n];
}
}
/* Take a length and build four lookup tables for applying the zeros operator
for that length, byte-by-byte on the operand. */
static void crc32c_zeros(uint32_t zeros[SHIFT_TABLE_X][SHIFT_TABLE_Y], size_t len) {
uint32_t op[32];
crc32c_zeros_op(op, len);
for (uint32_t n = 0; n < 256; n++) {
zeros[0][n] = gf2_matrix_times(op, n);
zeros[1][n] = gf2_matrix_times(op, n << 8);
zeros[2][n] = gf2_matrix_times(op, n << 16);
zeros[3][n] = gf2_matrix_times(op, n << 24);
}
}
// single CRC in software
static inline uint64_t crc32c_sw_inner(uint64_t crc, const uint8_t* buffer) {
crc ^= *reinterpret_cast<const uint64_t*>(buffer);
crc = crc32c_sw_lookup_table[7][crc & 0xff] ^
crc32c_sw_lookup_table[6][(crc >> 8) & 0xff] ^
crc32c_sw_lookup_table[5][(crc >> 16) & 0xff] ^
crc32c_sw_lookup_table[4][(crc >> 24) & 0xff] ^
crc32c_sw_lookup_table[3][(crc >> 32) & 0xff] ^
crc32c_sw_lookup_table[2][(crc >> 40) & 0xff] ^
crc32c_sw_lookup_table[1][(crc >> 48) & 0xff] ^
crc32c_sw_lookup_table[0][crc >> 56];
return crc;
}
//
// CRC32-C implementation using software
// No optimisation
//
uint32_t crc32c_sw_1way(const uint8_t* buf, size_t len, uint32_t crc_in) {
auto crc_flipped = ~crc_in;
auto crc = static_cast<uint64_t>(crc_flipped);
while ((reinterpret_cast<uintptr_t>(buf) & ALIGN64_MASK) != 0 && len > 0) {
crc = crc32c_sw_lookup_table[0][(crc ^ *buf) & 0xff] ^ (crc >> 8);
buf += sizeof(uint8_t);
len -= sizeof(uint8_t);
}
while (len >= sizeof(uint64_t)) {
crc = crc32c_sw_inner(crc, buf);
buf += sizeof(uint64_t);
len -= sizeof(uint64_t);
}
while (len > 0) {
crc = crc32c_sw_lookup_table[0][(crc ^ *buf) & 0xff] ^ (crc >> 8);
buf += sizeof(uint8_t);
len -= sizeof(uint8_t);
}
return static_cast<uint32_t>(crc ^ std::numeric_limits<uint32_t>::max());
}
//
// Partially optimised CRC32C which divides the data into 3 blocks
// allowing some free CPU pipelining/parallelisation.
//
uint32_t crc32c_sw_short_block(const uint8_t* buf, size_t len, uint32_t crc_in) {
// If len is less the 3 x SHORT_BLOCK just use the 1-way sw version
if (len < (3 * SHORT_BLOCK)) {
return crc32c_sw_1way(buf, len, crc_in);
}
auto crc_flipped = ~crc_in;
uint64_t crc = static_cast<uint64_t>(crc_flipped), crc1 = 0, crc2 = 0;
while ((reinterpret_cast<uintptr_t>(buf) & ALIGN64_MASK) != 0 && len > 0) {
crc = crc32c_sw_lookup_table[0][(crc ^ *buf) & 0xff] ^ (crc >> 8);
buf += sizeof(uint8_t);
len -= sizeof(uint8_t);
}
// process the data in 3 blocks and combine the crc's using the shift trick
while (len >= (3 * SHORT_BLOCK)) {
crc1 = 0;
crc2 = 0;
const uint8_t* end = buf + SHORT_BLOCK;
do
{
crc = crc32c_sw_inner(crc, buf);
crc1 = crc32c_sw_inner(crc1, (buf + SHORT_BLOCK));
crc2 = crc32c_sw_inner(crc2, (buf + (2 * SHORT_BLOCK)));
buf += sizeof(uint64_t);
} while (buf < end);
crc = crc32c_shift(crc32c_short, static_cast<uint32_t>(crc)) ^ crc1;
crc = crc32c_shift(crc32c_short, static_cast<uint32_t>(crc)) ^ crc2;
buf += 2 * SHORT_BLOCK;
len -= 3 * SHORT_BLOCK;
}
// swallow any remaining longs.
while (len >= sizeof(uint64_t)) {
crc = crc32c_sw_inner(crc, buf);
buf += sizeof(uint64_t);
len -= sizeof(uint64_t);
}
// swallow the remaining bytes.
while (len > 0) {
crc = crc32c_sw_lookup_table[0][(crc ^ *buf) & 0xff] ^ (crc >> 8);
buf += sizeof(uint8_t);
len -= sizeof(uint8_t);
}
return static_cast<uint32_t>(crc ^ std::numeric_limits<uint32_t>::max());
}
//
// CRC32-C software implementation.
//
uint32_t crc32c_sw (const uint8_t* buf, size_t len, uint32_t crc_in) {
// If len is less than the 3 x LONG_BLOCK it's faster to use the short-block only.
if (len < (3 * LONG_BLOCK)) {
return crc32c_sw_short_block(buf, len, crc_in);
}
auto crc_flipped = ~crc_in;
uint64_t crc = static_cast<uint64_t>(crc_flipped), crc1 = 0, crc2 = 0;
while ((reinterpret_cast<uintptr_t>(buf) & ALIGN64_MASK) != 0 && len > 0) {
crc = crc32c_sw_lookup_table[0][(crc ^ *buf) & 0xff] ^ (crc >> 8);
buf += sizeof(uint8_t);
len -= sizeof(uint8_t);
}
// process the data in 3 blocks and combine the crc's using the shift trick
while (len >= (3 * LONG_BLOCK)) {
crc1 = 0;
crc2 = 0;
const uint8_t* end = buf + LONG_BLOCK;
do
{
crc = crc32c_sw_inner(crc, buf);
crc1 = crc32c_sw_inner(crc1, (buf + LONG_BLOCK));
crc2 = crc32c_sw_inner(crc2, (buf + (2 * LONG_BLOCK)));
buf += sizeof(uint64_t);
} while (buf < end);
crc = crc32c_shift(crc32c_long, static_cast<uint32_t>(crc)) ^ crc1;
crc = crc32c_shift(crc32c_long, static_cast<uint32_t>(crc)) ^ crc2;
buf += 2 * LONG_BLOCK;
len -= 3 * LONG_BLOCK;
}
// process the data in 3 blocks and combine the crc's using the shift trick
while (len >= (3 * SHORT_BLOCK)) {
crc1 = 0;
crc2 = 0;
const uint8_t* end = buf + SHORT_BLOCK;
do
{
crc = crc32c_sw_inner(crc, buf);
crc1 = crc32c_sw_inner(crc1, (buf + SHORT_BLOCK));
crc2 = crc32c_sw_inner(crc2, (buf + (2 * SHORT_BLOCK)));
buf += sizeof(uint64_t);
} while (buf < end);
crc = crc32c_shift(crc32c_short, static_cast<uint32_t>(crc)) ^ crc1;
crc = crc32c_shift(crc32c_short, static_cast<uint32_t>(crc)) ^ crc2;
buf += 2 * SHORT_BLOCK;
len -= 3 * SHORT_BLOCK;
}
// swallow any remaining longs.
while (len >= sizeof(uint64_t)) {
crc = crc32c_sw_inner(crc, buf);
buf += sizeof(uint64_t);
len -= sizeof(uint64_t);
}
// swallow any remaining bytes.
while (len > 0) {
crc = crc32c_sw_lookup_table[0][(crc ^ *buf) & 0xff] ^ (crc >> 8);
buf += sizeof(uint8_t);
len -= sizeof(uint8_t);
}
return static_cast<uint32_t>(crc ^ std::numeric_limits<uint32_t>::max());
}
//
// Initialise tables for software and hardware functions.
//
bool setup_tables() {
uint32_t crc = 0;
for (int ii = 0; ii < TABLE_Y; ii++) {
crc = ii;
for (int jj = 0; jj < TABLE_X; jj++) {
crc = crc & 1 ? (crc >> 1) ^ CRC32C_POLYNOMIAL_REV : crc >> 1;
}
crc32c_sw_lookup_table[0][ii] = crc;
}
for (int ii = 0; ii < TABLE_Y; ii++) {
crc = crc32c_sw_lookup_table[0][ii];
for (int jj = 1; jj < TABLE_X; jj++) {
crc = crc32c_sw_lookup_table[jj][ii] =
crc32c_sw_lookup_table[0][crc & 0xff] ^ (crc >> 8);
}
}
crc32c_zeros(crc32c_long, LONG_BLOCK);
crc32c_zeros(crc32c_short, SHORT_BLOCK);
(void) tables_setup;
return true;
}
typedef uint32_t (*crc32c_function)(const uint8_t* buf,
size_t len,
uint32_t crc_in);
//
// Return the appropriate function for the platform.
// - x86-64: If SSE4.2 is available then hardware acceleration is
// used.
// - AArch64: If CRC32 instructions are available then hardware
// acceleration is used.
//
crc32c_function setup_crc32c() {
#if CB_CRC32_HW_SUPPORTED
#if FOLLY_X64
return folly::CpuId().sse42() ? crc32c_hw : crc32c_sw;
#elif FOLLY_AARCH64
unsigned long features = getauxval(AT_HWCAP);
return (features & HWCAP_CRC32) ? crc32c_hw : crc32c_sw;
#else
// AArch64, non-linux - TODO.
#error Unhandled OS for AArch64.
#endif
#else
return crc32c_sw;
#endif
}
static crc32c_function safe_crc32c = setup_crc32c();
//
// The exported crc32c method uses the function setup_crc32 decided
// is safe for the platform.
//
uint32_t crc32c (const uint8_t* buf, size_t len, uint32_t crc_in) {
return safe_crc32c(buf, len, crc_in);
}