Skip to content

Commit

Permalink
Debugging blake3
Browse files Browse the repository at this point in the history
  • Loading branch information
lundman committed Apr 10, 2023
1 parent 72c67e5 commit fc96fcf
Showing 1 changed file with 23 additions and 21 deletions.
44 changes: 23 additions & 21 deletions module/icp/algs/blake3/blake3.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ static size_t chunk_state_len(const blake3_chunk_state_t *ctx)
static size_t chunk_state_fill_buf(blake3_chunk_state_t *ctx,
const uint8_t *input, size_t input_len)
{
ASSERT3U(BLAKE3_BLOCK_LEN, >=, ((size_t)ctx->buf_len));
VERIFY3U(BLAKE3_BLOCK_LEN, >=, ((size_t)ctx->buf_len));
size_t take = BLAKE3_BLOCK_LEN - ((size_t)ctx->buf_len);
if (take > input_len) {
take = input_len;
Expand Down Expand Up @@ -149,7 +149,7 @@ static void output_root_bytes(const blake3_ops_t *ops, const output_t *ctx,
while (out_len > 0) {
ops->compress_xof(ctx->input_cv, ctx->block, ctx->block_len,
output_block_counter, ctx->flags | ROOT, wide_buf);
ASSERT3U(64, >=, offset_within_block);
VERIFY3U(64, >=, offset_within_block);
size_t available_bytes = 64 - offset_within_block;
size_t memcpy_len;
if (out_len > available_bytes) {
Expand All @@ -159,7 +159,7 @@ static void output_root_bytes(const blake3_ops_t *ops, const output_t *ctx,
}
memcpy(out, wide_buf + offset_within_block, memcpy_len);
out += memcpy_len;
ASSERT3U(out_len, >=, memcpy_len);
VERIFY3U(out_len, >=, memcpy_len);
out_len -= memcpy_len;
output_block_counter += 1;
offset_within_block = 0;
Expand All @@ -172,7 +172,7 @@ static void chunk_state_update(const blake3_ops_t *ops,
if (ctx->buf_len > 0) {
size_t take = chunk_state_fill_buf(ctx, input, input_len);
input += take;
ASSERT3U(input_len, >=, take);
VERIFY3U(input_len, >=, take);
input_len -= take;
if (input_len > 0) {
ops->compress_in_place(ctx->cv, ctx->buf,
Expand All @@ -190,7 +190,7 @@ static void chunk_state_update(const blake3_ops_t *ops,
ctx->flags|chunk_state_maybe_start_flag(ctx));
ctx->blocks_compressed += 1;
input += BLAKE3_BLOCK_LEN;
ASSERT3U(input_len, >=, BLAKE3_BLOCK_LEN);
VERIFY3U(input_len, >=, BLAKE3_BLOCK_LEN);
input_len -= BLAKE3_BLOCK_LEN;
}

Expand Down Expand Up @@ -223,7 +223,7 @@ static size_t left_len(size_t content_len)
* content_len
* should always be greater than BLAKE3_CHUNK_LEN.
*/
ASSERT3U(content_len, >=, 1);
VERIFY3U(content_len, >=, 1);
size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN;
return (round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN);
}
Expand All @@ -242,7 +242,7 @@ static size_t compress_chunks_parallel(const blake3_ops_t *ops,
size_t input_position = 0;
size_t chunks_array_len = 0;
while (input_len - input_position >= BLAKE3_CHUNK_LEN) {
ASSERT3U(input_len, >=, input_position);
VERIFY3U(input_len, >=, input_position);
chunks_array[chunks_array_len] = &input[input_position];
input_position += BLAKE3_CHUNK_LEN;
chunks_array_len += 1;
Expand All @@ -261,7 +261,7 @@ static size_t compress_chunks_parallel(const blake3_ops_t *ops,
blake3_chunk_state_t chunk_state;
chunk_state_init(&chunk_state, key, flags);
chunk_state.chunk_counter = counter;
ASSERT3U(input_len, >=, input_position);
VERIFY3U(input_len, >=, input_position);
chunk_state_update(ops, &chunk_state, &input[input_position],
input_len - input_position);
output_t output = chunk_state_output(&chunk_state);
Expand All @@ -288,7 +288,7 @@ static size_t compress_parents_parallel(const blake3_ops_t *ops,
size_t parents_array_len = 0;

while (num_chaining_values - (2 * parents_array_len) >= 2) {
ASSERT3U(num_chaining_values, >=, (2 * parents_array_len));
VERIFY3U(num_chaining_values, >=, (2 * parents_array_len));
parents_array[parents_array_len] = &child_chaining_values[2 *
parents_array_len * BLAKE3_OUT_LEN];
parents_array_len += 1;
Expand Down Expand Up @@ -351,7 +351,7 @@ static size_t blake3_compress_subtree_wide(const blake3_ops_t *ops,
* strategy.)
*/
size_t left_input_len = left_len(input_len);
ASSERT3U(input_len, >=, left_input_len);
VERIFY3U(input_len, >=, left_input_len);
size_t right_input_len = input_len - left_input_len;
const uint8_t *right_input = &input[left_input_len];
uint64_t right_chunk_counter = chunk_counter +
Expand Down Expand Up @@ -462,7 +462,7 @@ static void hasher_merge_cv_stack(BLAKE3_CTX *ctx, uint64_t total_len)
{
size_t post_merge_stack_len = (size_t)popcnt(total_len);
while (ctx->cv_stack_len > post_merge_stack_len) {
ASSERT3U(ctx->cv_stack_len, >=, 2);
VERIFY3U(ctx->cv_stack_len, >=, 2 /* A */);
uint8_t *parent_node =
&ctx->cv_stack[(ctx->cv_stack_len - 2) * BLAKE3_OUT_LEN];
output_t output =
Expand All @@ -481,13 +481,14 @@ static void hasher_merge_cv_stack(BLAKE3_CTX *ctx, uint64_t total_len)
{
uint32_t cv_words[8];

memcpy(cv_words, (&output)->input_cv, 32);
VERIFY3U(output.block_len, >=, 0);
memcpy(cv_words, output.input_cv, 32);
((const blake3_ops_t *)ctx->ops)->compress_in_place(
cv_words, (&output)->block, (&output)->block_len,
(&output)->counter, (&output)->flags);
store_cv_words((&output), cv_words);
cv_words, output.block, output.block_len,
output.counter, output.flags);
store_cv_words(parent_node, cv_words);
}
ASSERT3U(ctx->cv_stack_len, >=, 1);
VERIFY3U(ctx->cv_stack_len, >=, 0);
#endif
ctx->cv_stack_len -= 1;
}
Expand Down Expand Up @@ -576,7 +577,7 @@ Blake3_Update2(BLAKE3_CTX *ctx, const void *input, size_t input_len)
}
chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, take);
input_bytes += take;
ASSERT3U(input_len, >=, take);
VERIFY3U(input_len, >=, take);
input_len -= take;
/*
* If we've filled the current chunk and there's more coming,
Expand Down Expand Up @@ -636,7 +637,7 @@ Blake3_Update2(BLAKE3_CTX *ctx, const void *input, size_t input_len)
* https://github.com/BLAKE3-team/BLAKE3/issues/69.
*/
while ((((uint64_t)(subtree_len - 1)) & count_so_far) != 0) {
ASSERT3U(subtree_len, >=, 1);
VERIFY3U(subtree_len, >=, 1);
subtree_len /= 2;
}
/*
Expand Down Expand Up @@ -672,7 +673,7 @@ Blake3_Update2(BLAKE3_CTX *ctx, const void *input, size_t input_len)
}
ctx->chunk.chunk_counter += subtree_chunks;
input_bytes += subtree_len;
ASSERT3U(input_len, >=, subtree_len);
VERIFY3U(input_len, >=, subtree_len);
input_len -= subtree_len;
}

Expand Down Expand Up @@ -704,7 +705,7 @@ Blake3_Update(BLAKE3_CTX *ctx, const void *input, size_t todo)
size_t block = (todo >= block_max) ? block_max : todo;
Blake3_Update2(ctx, data + done, block);
done += block;
ASSERT3U(todo, >=, block);
VERIFY3U(todo, >=, block);
todo -= block;
}
}
Expand Down Expand Up @@ -755,7 +756,8 @@ Blake3_FinalSeek(const BLAKE3_CTX *ctx, uint64_t seek, uint8_t *out,
output = chunk_state_output(&ctx->chunk);
} else {
/* There are always at least 2 CVs in the stack in this case. */
ASSERT3U(ctx->cv_stack_len, >=, 2);
VERIFY3U(ctx->cv_stack_len, !=, 1 /* B */);
VERIFY3U(ctx->cv_stack_len, >=, 2 /* B */);
cvs_remaining = ctx->cv_stack_len - 2;
output = parent_output(&ctx->cv_stack[cvs_remaining * 32],
ctx->key, ctx->chunk.flags);
Expand Down

0 comments on commit fc96fcf

Please sign in to comment.