aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJack O'Connor <[email protected]>2020-01-21 14:04:59 -0500
committerJack O'Connor <[email protected]>2020-01-22 21:32:39 -0500
commit163f52245d3f6cfcc2ec2c91e01412ffba5f6d45 (patch)
tree91937cc6aba2bb9d63b83b4d82b431fb9f9b9446
parentde1cf0038e26b8371408b4cb8b7fc6b4a47659df (diff)
port compress_subtree_to_parent_node from Rust to C
This recursive function performs parallel parent node hashing, which is an important optimization.
-rw-r--r--c/Makefile8
-rw-r--r--c/blake3.c367
-rw-r--r--c/blake3.h7
-rw-r--r--c/blake3_c_rust_bindings/src/test.rs7
-rw-r--r--c/blake3_dispatch.c39
-rw-r--r--c/blake3_impl.h29
-rw-r--r--c/main.c1
7 files changed, 380 insertions, 78 deletions
diff --git a/c/Makefile b/c/Makefile
index cc032c1..780e4ec 100644
--- a/c/Makefile
+++ b/c/Makefile
@@ -1,7 +1,6 @@
NAME=blake3
CC=gcc
CFLAGS=-O3 -Wall -Wextra -std=c11 -pedantic
-BLAKE3_NO_NEON ?= 1
TARGETS=
EXTRAFLAGS=-DBLAKE3_TESTING
@@ -23,9 +22,8 @@ else
TARGETS += blake3_avx512.o
endif
-ifdef BLAKE3_NO_NEON
-EXTRAFLAGS += -DBLAKE3_NO_NEON
-else
+ifdef BLAKE3_USE_NEON
+EXTRAFLAGS += -DBLAKE3_USE_NEON
TARGETS += blake3_neon.o
endif
@@ -44,7 +42,7 @@ blake3_avx512.o: blake3_avx512.c
blake3_neon.o: blake3_neon.c
$(CC) $(CFLAGS) $(EXTRAFLAGS) -c $^ -o $@
-test: CFLAGS += -DBLAKE3_TESTING
+test: CFLAGS += -DBLAKE3_TESTING -fsanitize=address -fsanitize=undefined
test: all
./test.py
diff --git a/c/blake3.c b/c/blake3.c
index 15f6295..17fbc3b 100644
--- a/c/blake3.c
+++ b/c/blake3.c
@@ -1,6 +1,3 @@
-// NB: This is only for benchmarking. The guy who wrote this file hasn't
-// touched C since college. Please don't use this code in production.
-
#include <assert.h>
#include <stdbool.h>
#include <string.h>
@@ -149,6 +146,207 @@ INLINE output_t parent_output(const uint8_t block[BLAKE3_BLOCK_LEN],
return make_output(key, block, BLAKE3_BLOCK_LEN, 0, flags | PARENT);
}
+// Given some input larger than one chunk, return the number of bytes that
+// should go in the left subtree. This is the largest power-of-2 number of
+// chunks that leaves at least 1 byte for the right subtree.
+INLINE size_t left_len(size_t content_len) {
+ // Subtract 1 to reserve at least one byte for the right side. content_len
+ // should always be greater than BLAKE3_CHUNK_LEN.
+ size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN;
+ return round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN;
+}
+
+// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
+// on a single thread. Write out the chunk chaining values and return the
+// number of chunks hashed. These chunks are never the root and never empty;
+// those cases use a different codepath.
+INLINE size_t compress_chunks_parallel(const uint8_t *input, size_t input_len,
+ const uint32_t key[8],
+ uint64_t chunk_counter, uint8_t flags,
+ uint8_t *out) {
+#if defined(BLAKE3_TESTING)
+ assert(0 < input_len);
+ assert(input_len <= MAX_SIMD_DEGREE * BLAKE3_CHUNK_LEN);
+#endif
+
+ const uint8_t *chunks_array[MAX_SIMD_DEGREE];
+ size_t input_position = 0;
+ size_t chunks_array_len = 0;
+ while (input_len - input_position >= BLAKE3_CHUNK_LEN) {
+ chunks_array[chunks_array_len] = &input[input_position];
+ input_position += BLAKE3_CHUNK_LEN;
+ chunks_array_len += 1;
+ }
+
+ blake3_hash_many(chunks_array, chunks_array_len,
+ BLAKE3_CHUNK_LEN / BLAKE3_BLOCK_LEN, key, chunk_counter,
+ true, flags, CHUNK_START, CHUNK_END, out);
+
+ // Hash the remaining partial chunk, if there is one. Note that the empty
+ // chunk (meaning the empty message) is a different codepath.
+ if (input_len > input_position) {
+ uint64_t counter = chunk_counter + (uint64_t)chunks_array_len;
+ blake3_chunk_state chunk_state;
+ chunk_state_init(&chunk_state, key, flags);
+ chunk_state.chunk_counter = counter;
+ chunk_state_update(&chunk_state, &input[input_position],
+ input_len - input_position);
+ output_t output = chunk_state_output(&chunk_state);
+ output_chaining_value(&output, &out[chunks_array_len * BLAKE3_OUT_LEN]);
+ return chunks_array_len + 1;
+ } else {
+ return chunks_array_len;
+ }
+}
+
+// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
+// on a single thread. Write out the parent chaining values and return the
+// number of parents hashed. (If there's an odd input chaining value left over,
+// return it as an additional output.) These parents are never the root and
+// never empty; those cases use a different codepath.
+INLINE size_t compress_parents_parallel(const uint8_t *child_chaining_values,
+ size_t num_chaining_values,
+ const uint32_t key[8], uint8_t flags,
+ uint8_t *out) {
+#if defined(BLAKE3_TESTING)
+ assert(2 <= num_chaining_values);
+ assert(num_chaining_values <= 2 * MAX_SIMD_DEGREE_OR_2);
+#endif
+
+ const uint8_t *parents_array[MAX_SIMD_DEGREE_OR_2];
+ size_t parents_array_len = 0;
+ while (num_chaining_values - (2 * parents_array_len) >= 2) {
+ parents_array[parents_array_len] =
+ &child_chaining_values[2 * parents_array_len * BLAKE3_OUT_LEN];
+ parents_array_len += 1;
+ }
+
+ blake3_hash_many(parents_array, parents_array_len, 1, key,
+ 0, // Parents always use counter 0.
+ false, flags | PARENT,
+ 0, // Parents have no start flags.
+ 0, // Parents have no end flags.
+ out);
+
+ // If there's an odd child left over, it becomes an output.
+ if (num_chaining_values > 2 * parents_array_len) {
+ memcpy(&out[parents_array_len * BLAKE3_OUT_LEN],
+ &child_chaining_values[2 * parents_array_len * BLAKE3_OUT_LEN],
+ BLAKE3_OUT_LEN);
+ return parents_array_len + 1;
+ } else {
+ return parents_array_len;
+ }
+}
+
+// The wide helper function returns (writes out) an array of chaining values
+// and returns the length of that array. The number of chaining values returned
+// is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
+// if the input is shorter than that many chunks. The reason for maintaining a
+// wide array of chaining values going back up the tree, is to allow the
+// implementation to hash as many parents in parallel as possible.
+//
+// As a special case when the SIMD degree is 1, this function will still return
+// at least 2 outputs. This guarantees that this function doesn't perform the
+// root compression. (If it did, it would use the wrong flags, and also we
+// wouldn't be able to implement exendable ouput.) Note that this function is
+// not used when the whole input is only 1 chunk long; that's a different
+// codepath.
+//
+// Why not just have the caller split the input on the first update(), instead
+// of implementing this special rule? Because we don't want to limit SIMD or
+// multi-threading parallelism for that update().
+size_t blake3_compress_subtree_wide(const uint8_t *input, size_t input_len,
+ const uint32_t key[8],
+ uint64_t chunk_counter, uint8_t flags,
+ uint8_t *out) {
+ // Note that the single chunk case does *not* bump the SIMD degree up to 2
+ // when it is 1. If this implementation adds multi-threading in the future,
+ // this gives us the option of multi-threading even the 2-chunk case, which
+ // can help performance on smaller platforms.
+ if (input_len <= blake3_simd_degree() * BLAKE3_CHUNK_LEN) {
+ return compress_chunks_parallel(input, input_len, key, chunk_counter, flags,
+ out);
+ }
+
+ // With more than simd_degree chunks, we need to recurse. Start by dividing
+ // the input into left and right subtrees. (Note that this is only optimal
+ // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
+ // of 3 or something, we'll need a more complicated strategy.)
+ size_t left_input_len = left_len(input_len);
+ size_t right_input_len = input_len - left_input_len;
+ const uint8_t *right_input = &input[left_input_len];
+ uint64_t right_chunk_counter =
+ chunk_counter + (uint64_t)(left_input_len / BLAKE3_CHUNK_LEN);
+
+ // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
+ // account for the special case of returning 2 outputs when the SIMD degree
+ // is 1.
+ uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
+ size_t degree = blake3_simd_degree();
+ if (left_input_len > BLAKE3_CHUNK_LEN && degree == 1) {
+ // The special case: We always use a degree of at least two, to make
+ // sure there are two outputs. Except, as noted above, at the chunk
+ // level, where we allow degree=1. (Note that the 1-chunk-input case is
+ // a different codepath.)
+ degree = 2;
+ }
+ uint8_t *right_cvs = &cv_array[degree * BLAKE3_OUT_LEN];
+
+ // Recurse! If this implementation adds multi-threading support in the
+ // future, this is where it will go.
+ size_t left_n = blake3_compress_subtree_wide(input, left_input_len, key,
+ chunk_counter, flags, cv_array);
+ size_t right_n = blake3_compress_subtree_wide(
+ right_input, right_input_len, key, right_chunk_counter, flags, right_cvs);
+
+ // The special case again. If simd_degree=1, then we'll have left_n=1 and
+ // right_n=1. Rather than compressing them into a single output, return
+ // them directly, to make sure we always have at least two outputs.
+ if (left_n == 1) {
+ memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
+ return 2;
+ }
+
+ // Otherwise, do one layer of parent node compression.
+ size_t num_chaining_values = left_n + right_n;
+ return compress_parents_parallel(cv_array, num_chaining_values, key, flags,
+ out);
+}
+
+// Hash a subtree with compress_subtree_wide(), and then condense the resulting
+// list of chaining values down to a single parent node. Don't compress that
+// last parent node, however. Instead, return its message bytes (the
+// concatenated chaining values of its children). This is necessary when the
+// first call to update() supplies a complete subtree, because the topmost
+// parent node of that subtree could end up being the root. It's also necessary
+// for extended output in the general case.
+//
+// As with compress_subtree_wide(), this function is not used on inputs of 1
+// chunk or less. That's a different codepath.
+INLINE void compress_subtree_to_parent_node(
+ const uint8_t *input, size_t input_len, const uint32_t key[8],
+ uint64_t chunk_counter, uint8_t flags, uint8_t out[2 * BLAKE3_OUT_LEN]) {
+#if defined(BLAKE3_TESTING)
+ assert(input_len > BLAKE3_CHUNK_LEN);
+#endif
+
+ uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
+ size_t num_cvs = blake3_compress_subtree_wide(input, input_len, key,
+ chunk_counter, flags, cv_array);
+
+ // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
+ // compress_subtree_wide() returns more than 2 chaining values. Condense
+ // them into 2 by forming parent nodes repeatedly.
+ uint8_t out_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN / 2];
+ while (num_cvs > 2) {
+ num_cvs =
+ compress_parents_parallel(cv_array, num_cvs, key, flags, out_array);
+ memcpy(cv_array, out_array, num_cvs * BLAKE3_OUT_LEN);
+ }
+ memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
+}
+
INLINE void hasher_init_base(blake3_hasher *self, const uint32_t key[8],
uint8_t flags) {
memcpy(self->key, key, BLAKE3_KEY_LEN);
@@ -176,28 +374,63 @@ void blake3_hasher_init_derive_key(blake3_hasher *self, const char *context) {
hasher_init_base(self, context_key_words, DERIVE_KEY_MATERIAL);
}
-INLINE bool hasher_needs_merge(const blake3_hasher *self,
- uint64_t total_chunks) {
- return self->cv_stack_len > popcnt(total_chunks);
-}
-
-INLINE void hasher_merge_parent(blake3_hasher *self) {
- size_t parent_block_start =
- (((size_t)self->cv_stack_len) - 2) * BLAKE3_OUT_LEN;
- output_t output = parent_output(&self->cv_stack[parent_block_start],
- self->key, self->chunk.flags);
- output_chaining_value(&output, &self->cv_stack[parent_block_start]);
- self->cv_stack_len -= 1;
+// As described in hasher_push_cv() below, we do "lazy merging", delaying
+// merges until right before the next CV is about to be added. This is
+// different from the reference implementation. Another difference is that we
+// aren't always merging 1 chunk at a time. Instead, each CV might represent
+// any power-of-two number of chunks, as long as the smaller-above-larger stack
+// order is maintained. Instead of the "count the trailing 0-bits" algorithm
+// described in the spec, we use a "count the total number of 1-bits" variant
+// that doesn't require us to retain the subtree size of the CV on top of the
+// stack. The principle is the same: each CV that should remain in the stack is
+// represented by a 1-bit in the total number of chunks (or bytes) so far.
+INLINE void hasher_merge_cv_stack(blake3_hasher *self, uint64_t total_len) {
+ size_t post_merge_stack_len = (size_t)popcnt(total_len);
+ while (self->cv_stack_len > post_merge_stack_len) {
+ uint8_t *parent_node =
+ &self->cv_stack[(self->cv_stack_len - 2) * BLAKE3_OUT_LEN];
+ output_t output = parent_output(parent_node, self->key, self->chunk.flags);
+ output_chaining_value(&output, parent_node);
+ self->cv_stack_len -= 1;
+ }
}
-INLINE void hasher_push_chunk_cv(blake3_hasher *self,
- uint8_t cv[BLAKE3_OUT_LEN],
- uint64_t chunk_counter) {
- assert(self->cv_stack_len < BLAKE3_MAX_DEPTH);
- while (hasher_needs_merge(self, chunk_counter)) {
- hasher_merge_parent(self);
- }
- memcpy(&self->cv_stack[self->cv_stack_len * BLAKE3_OUT_LEN], cv,
+// In reference_impl.rs, we merge the new CV with existing CVs from the stack
+// before pushing it. We can do that because we know more input is coming, so
+// we know none of the merges are root.
+//
+// This setting is different. We want to feed as much input as possible to
+// compress_subtree_wide(), without setting aside anything for the chunk_state.
+// If the user gives us 64 KiB, we want to parallelize over all 64 KiB at once
+// as a single subtree, if at all possible.
+//
+// This leads to two problems:
+// 1) This 64 KiB input might be the only call that ever gets made to update.
+// In this case, the root node of the 64 KiB subtree would be the root node
+// of the whole tree, and it would need to be ROOT finalized. We can't
+// compress it until we know.
+// 2) This 64 KiB input might complete a larger tree, whose root node is
+// similarly going to be the the root of the whole tree. For example, maybe
+// we have 196 KiB (that is, 128 + 64) hashed so far. We can't compress the
+// node at the root of the 256 KiB subtree until we know how to finalize it.
+//
+// The second problem is solved with "lazy merging". That is, when we're about
+// to add a CV to the stack, we don't merge it with anything first, as the
+// reference impl does. Instead we do merges using the *previous* CV that was
+// added, which is sitting on top of the stack, and we put the new CV
+// (unmerged) on top of the stack afterwards. This guarantees that we never
+// merge the root node until finalize().
+//
+// Solving the first problem requires an additional tool,
+// compress_subtree_to_parent_node(). That function always returns the top
+// *two* chaining values of the subtree it's compressing. We then do lazy
+// merging with each of them separately, so that the second CV will always
+// remain unmerged. (The compress_subtree_to_parent_node also helps us support
+// extendable output when we're hashing an input all-at-once.)
+INLINE void hasher_push_cv(blake3_hasher *self, uint8_t new_cv[BLAKE3_OUT_LEN],
+ uint64_t chunk_counter) {
+ hasher_merge_cv_stack(self, chunk_counter);
+ memcpy(&self->cv_stack[self->cv_stack_len * BLAKE3_OUT_LEN], new_cv,
BLAKE3_OUT_LEN);
self->cv_stack_len += 1;
}
@@ -214,11 +447,9 @@ void blake3_hasher_update(blake3_hasher *self, const void *input,
const uint8_t *input_bytes = (const uint8_t *)input;
- // If we already have a partial chunk, or if this is the very first chunk
- // (and it could be the root), we need to add bytes to the chunk state.
- bool is_first_chunk = self->chunk.chunk_counter == 0;
- bool maybe_root = is_first_chunk && input_len == BLAKE3_CHUNK_LEN;
- if (maybe_root || chunk_state_len(&self->chunk) > 0) {
+ // If we have some partial chunk bytes in the internal chunk_state, we need
+ // to finish that chunk first.
+ if (chunk_state_len(&self->chunk) > 0) {
size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&self->chunk);
if (take > input_len) {
take = input_len;
@@ -232,37 +463,67 @@ void blake3_hasher_update(blake3_hasher *self, const void *input,
output_t output = chunk_state_output(&self->chunk);
uint8_t chunk_cv[32];
output_chaining_value(&output, chunk_cv);
- hasher_push_chunk_cv(self, chunk_cv, self->chunk.chunk_counter);
+ hasher_push_cv(self, chunk_cv, self->chunk.chunk_counter);
chunk_state_reset(&self->chunk, self->key, self->chunk.chunk_counter + 1);
} else {
return;
}
}
- // Hash as many whole chunks as we can, without buffering anything. At this
- // point we know none of them can be the root.
- uint8_t out[BLAKE3_OUT_LEN * BLAKE3_MAX_SIMD_DEGREE];
- const uint8_t *chunks[BLAKE3_MAX_SIMD_DEGREE];
- size_t num_chunks = 0;
- while (input_len >= BLAKE3_CHUNK_LEN) {
- while (input_len >= BLAKE3_CHUNK_LEN &&
- num_chunks < BLAKE3_MAX_SIMD_DEGREE) {
- chunks[num_chunks] = input_bytes;
- input_bytes += BLAKE3_CHUNK_LEN;
- input_len -= BLAKE3_CHUNK_LEN;
- num_chunks += 1;
+ // Now the chunk_state is clear, and we have more input. If there's more than
+ // a single chunk (so, definitely not the root chunk), hash the largest whole
+ // subtree we can, with the full benefits of SIMD and multi-threading
+ // parallelism. Two restrictions:
+ // - The subtree has to be a power-of-2 number of chunks. Only subtrees along
+ // the right edge can be incomplete, and we don't know where the right edge
+ // is going to be until we get to finalize().
+ // - The subtree must evenly divide the total number of chunks up until this
+ // point (if total is not 0). If the current incomplete subtree is only
+ // waiting for 1 more chunk, we can't hash a subtree of 4 chunks. We have
+ // to complete the current subtree first.
+ // Because we might need to break up the input to form powers of 2, or to
+ // evenly divide what we already have, this part runs in a loop.
+ while (input_len > BLAKE3_CHUNK_LEN) {
+ size_t subtree_len = (size_t)round_down_to_power_of_2((uint64_t)input_len);
+ uint64_t count_so_far = self->chunk.chunk_counter * BLAKE3_CHUNK_LEN;
+ // Shrink the subtree_len until *half of it* it evenly divides the count so
+ // far. Why half? Because compress_subtree_to_parent_node will return a
+ // pair of chaining values, each representing half of the input. As long as
+ // those evenly divide the input so far, we're good. We know that
+ // subtree_len itself is a power of 2, so we can use a bitmasking trick
+ // instead of an actual remainder operation. (Note that if the caller
+ // consistently passes power-of-2 inputs of the same size, as is hopefully
+ // typical, this loop condition will always fail, and subtree_len will
+ // always be the full length of the input.)
+ while ((((uint64_t)((subtree_len / 2) - 1)) & count_so_far) != 0) {
+ subtree_len /= 2;
}
- blake3_hash_many(chunks, num_chunks, BLAKE3_CHUNK_LEN / BLAKE3_BLOCK_LEN,
- self->key, self->chunk.chunk_counter, true,
- self->chunk.flags, CHUNK_START, CHUNK_END, out);
- for (size_t chunk_index = 0; chunk_index < num_chunks; chunk_index++) {
- // The chunk state is empty here, but it stores the counter of the next
- // chunk hash we need to push. Use that counter, and then move it forward.
- hasher_push_chunk_cv(self, &out[chunk_index * BLAKE3_OUT_LEN],
- self->chunk.chunk_counter);
- self->chunk.chunk_counter += 1;
+ // The shrunken subtree_len might now be 1 chunk long. If so, hash that one
+ // chunk by itself. Otherwise, compress the subtree into a pair of CVs.
+ uint64_t subtree_chunks = subtree_len / BLAKE3_CHUNK_LEN;
+ if (subtree_len <= BLAKE3_CHUNK_LEN) {
+ blake3_chunk_state chunk_state;
+ chunk_state_init(&chunk_state, self->key, self->chunk.flags);
+ chunk_state.chunk_counter = self->chunk.chunk_counter;
+ chunk_state_update(&chunk_state, input_bytes, subtree_len);
+ output_t output = chunk_state_output(&chunk_state);
+ uint8_t cv[BLAKE3_OUT_LEN];
+ output_chaining_value(&output, cv);
+ hasher_push_cv(self, cv, chunk_state.chunk_counter);
+ } else {
+ // This is the high-performance happy path, though getting here depends
+ // on the caller giving us a long enough input.
+ uint8_t cv_pair[2 * BLAKE3_OUT_LEN];
+ compress_subtree_to_parent_node(input_bytes, subtree_len, self->key,
+ self->chunk.chunk_counter,
+ self->chunk.flags, cv_pair);
+ hasher_push_cv(self, cv_pair, self->chunk.chunk_counter);
+ hasher_push_cv(self, &cv_pair[BLAKE3_OUT_LEN],
+ self->chunk.chunk_counter + (subtree_chunks / 2));
}
- num_chunks = 0;
+ self->chunk.chunk_counter += subtree_chunks;
+ input_bytes = input_bytes + subtree_len;
+ input_len -= subtree_len;
}
// If there's any remaining input less than a full chunk, add it to the chunk
@@ -272,10 +533,8 @@ void blake3_hasher_update(blake3_hasher *self, const void *input,
// here, because hasher_push_chunk_cv already does its own merge loop, but it
// simplifies blake3_hasher_finalize below.
if (input_len > 0) {
- while (hasher_needs_merge(self, self->chunk.chunk_counter)) {
- hasher_merge_parent(self);
- }
chunk_state_update(&self->chunk, input_bytes, input_len);
+ hasher_merge_cv_stack(self, self->chunk.chunk_counter);
}
}
diff --git a/c/blake3.h b/c/blake3.h
index c16747d..43e6522 100644
--- a/c/blake3.h
+++ b/c/blake3.h
@@ -23,7 +23,12 @@ typedef struct {
uint32_t key[8];
blake3_chunk_state chunk;
uint8_t cv_stack_len;
- uint8_t cv_stack[BLAKE3_MAX_DEPTH * BLAKE3_OUT_LEN];
+ // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
+ // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
+ // requires a 4th entry, rather than merging everything down to 1, because we
+ // don't know whether more input is coming. This is different from how the
+ // reference implementation does things.
+ uint8_t cv_stack[(BLAKE3_MAX_DEPTH + 1) * BLAKE3_OUT_LEN];
} blake3_hasher;
void blake3_hasher_init(blake3_hasher *self);
diff --git a/c/blake3_c_rust_bindings/src/test.rs b/c/blake3_c_rust_bindings/src/test.rs
index abfab42..e2849c7 100644
--- a/c/blake3_c_rust_bindings/src/test.rs
+++ b/c/blake3_c_rust_bindings/src/test.rs
@@ -226,7 +226,6 @@ pub fn test_hash_many_fn(hash_many_fn: HashManyFn) {
);
}
for n in 0..NUM_INPUTS {
- #[cfg(feature = "std")]
dbg!(n);
assert_eq!(
&portable_chunks_out[n * OUT_LEN..][..OUT_LEN],
@@ -271,7 +270,6 @@ pub fn test_hash_many_fn(hash_many_fn: HashManyFn) {
);
}
for n in 0..NUM_INPUTS {
- #[cfg(feature = "std")]
dbg!(n);
assert_eq!(
&portable_parents_out[n * OUT_LEN..][..OUT_LEN],
@@ -326,7 +324,6 @@ fn test_compare_reference_impl() {
paint_test_input(&mut input_buf);
for &case in TEST_CASES {
let input = &input_buf[..case];
- #[cfg(feature = "std")]
dbg!(case);
// regular
@@ -393,14 +390,12 @@ fn test_compare_update_multiple() {
paint_test_input(&mut input_buf);
for &first_update in short_test_cases {
- #[cfg(feature = "std")]
dbg!(first_update);
let first_input = &input_buf[..first_update];
let mut test_hasher = crate::Hasher::new();
test_hasher.update(first_input);
for &second_update in short_test_cases {
- #[cfg(feature = "std")]
dbg!(second_update);
let second_input = &input_buf[first_update..][..second_update];
let total_input = &input_buf[..first_update + second_update];
@@ -432,14 +427,12 @@ fn test_fuzz_hasher() {
// Use a fixed RNG seed for reproducibility.
let mut rng = rand_chacha::ChaCha8Rng::from_seed([1; 32]);
for _num_test in 0..num_tests {
- #[cfg(feature = "std")]
dbg!(_num_test);
let mut hasher = crate::Hasher::new();
let mut total_input = 0;
// For each test, write 3 inputs of random length.
for _ in 0..3 {
let input_len = rng.gen_range(0, INPUT_MAX + 1);
- #[cfg(feature = "std")]
dbg!(input_len);
let input = &input_buf[total_input..][..input_len];
hasher.update(input);
diff --git a/c/blake3_dispatch.c b/c/blake3_dispatch.c
index 782139a..27a4d77 100644
--- a/c/blake3_dispatch.c
+++ b/c/blake3_dispatch.c
@@ -2,16 +2,7 @@
#include <stddef.h>
#include <stdint.h>
-#include "blake3.h"
-
-#if defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || \
- defined(_M_X64)
-#define IS_X86
-#endif
-
-#if defined(__arm__)
-#define IS_ARM
-#endif
+#include "blake3_impl.h"
#if defined(IS_X86)
#if defined(_MSC_VER)
@@ -82,7 +73,7 @@ void blake3_hash_many_avx512(const uint8_t *const *inputs, size_t num_inputs,
#endif
#endif
-#if defined(IS_ARM) && !defined(BLAKE3_NO_NEON)
+#if defined(IS_ARM) && defined(BLAKE3_USE_NEON)
void blake3_hash_many_neon(const uint8_t *const *inputs, size_t num_inputs,
size_t blocks, const uint32_t key[8],
uint64_t counter, bool increment_counter,
@@ -288,3 +279,29 @@ void blake3_hash_many(const uint8_t *const *inputs, size_t num_inputs,
increment_counter, flags, flags_start, flags_end,
out);
}
+
+// The dynamically detected SIMD degree of the current platform.
+size_t blake3_simd_degree() {
+ const enum cpu_feature features = get_cpu_features();
+#if defined(IS_X86)
+#if !defined(BLAKE3_NO_AVX512)
+ if (features & AVX512F) {
+ return 16;
+ }
+#endif
+#if !defined(BLAKE3_NO_AVX2)
+ if (features & AVX2) {
+ return 8;
+ }
+#endif
+#if !defined(BLAKE3_NO_SSE41)
+ if (features & SSE41) {
+ return 4;
+ }
+#endif
+#endif
+#if defined(BLAKE3_USE_NEON)
+ return 4;
+#endif
+ return 1;
+}
diff --git a/c/blake3_impl.h b/c/blake3_impl.h
index aef6fd7..20ef83d 100644
--- a/c/blake3_impl.h
+++ b/c/blake3_impl.h
@@ -29,6 +29,33 @@
#define INLINE __attribute__((always_inline)) static inline
#endif
+#if defined(__x86_64__) || defined(__i386__) || defined(_M_IX86) || \
+ defined(_M_X64)
+#define IS_X86
+#endif
+
+#if defined(__arm__)
+#define IS_ARM
+#endif
+
+#if defined(IS_X86)
+#define MAX_SIMD_DEGREE 16
+#elif defined(BLAKE3_USE_NEON)
+#define MAX_SIMD_DEGREE 4
+#else
+#define MAX_SIMD_DEGREE 1
+#endif
+
+// There are some places where we want a static size that's equal to the
+// MAX_SIMD_DEGREE, but also at least 2.
+#if defined(IS_X86)
+#define MAX_SIMD_DEGREE_OR_2 16
+#elif defined(BLAKE3_USE_NEON)
+#define MAX_SIMD_DEGREE_OR_2 4
+#else
+#define MAX_SIMD_DEGREE_OR_2 2
+#endif
+
static const uint32_t IV[8] = {0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL,
0xA54FF53AUL, 0x510E527FUL, 0x9B05688CUL,
0x1F83D9ABUL, 0x5BE0CD19UL};
@@ -108,3 +135,5 @@ void blake3_hash_many(const uint8_t *const *inputs, size_t num_inputs,
size_t blocks, const uint32_t key[8], uint64_t counter,
bool increment_counter, uint8_t flags,
uint8_t flags_start, uint8_t flags_end, uint8_t *out);
+
+size_t blake3_simd_degree();
diff --git a/c/main.c b/c/main.c
index 8638303..9ef7bce 100644
--- a/c/main.c
+++ b/c/main.c
@@ -157,5 +157,6 @@ int main(int argc, char **argv) {
free(out);
feature = (feature - mask) & mask;
} while (feature != 0);
+ free(buf);
return 0;
}