aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--b3sum/src/main.rs2
-rw-r--r--benches/bench.rs3
-rw-r--r--src/guts.rs3
-rw-r--r--src/lib.rs14
-rw-r--r--test_vectors/src/lib.rs4
5 files changed, 12 insertions, 14 deletions
diff --git a/b3sum/src/main.rs b/b3sum/src/main.rs
index 3810bfe..2a9b737 100644
--- a/b3sum/src/main.rs
+++ b/b3sum/src/main.rs
@@ -308,7 +308,7 @@ fn maybe_memmap_file(file: &File) -> Result<Option<memmap::Mmap>> {
fn write_hex_output(mut output: blake3::OutputReader, args: &Args) -> Result<()> {
// Encoding multiples of the block size is most efficient.
let mut len = args.len()?;
- let mut block = [0; blake3::BLOCK_LEN];
+ let mut block = [0; blake3::guts::BLOCK_LEN];
while len > 0 {
output.fill(&mut block);
let hex_str = hex::encode(&block[..]);
diff --git a/benches/bench.rs b/benches/bench.rs
index 90dbc42..9038a94 100644
--- a/benches/bench.rs
+++ b/benches/bench.rs
@@ -4,8 +4,9 @@ extern crate test;
use arrayref::array_ref;
use arrayvec::ArrayVec;
+use blake3::guts::{BLOCK_LEN, CHUNK_LEN};
use blake3::platform::{Platform, MAX_SIMD_DEGREE};
-use blake3::{BLOCK_LEN, CHUNK_LEN, OUT_LEN};
+use blake3::OUT_LEN;
use rand::prelude::*;
use test::Bencher;
diff --git a/src/guts.rs b/src/guts.rs
index e06721c..ecde326 100644
--- a/src/guts.rs
+++ b/src/guts.rs
@@ -6,6 +6,9 @@
//! We could stabilize something like this module in the future. If you have a
//! use case for it, please let us know by filing a GitHub issue.
+pub const BLOCK_LEN: usize = 64;
+pub const CHUNK_LEN: usize = 1024;
+
#[derive(Clone, Debug)]
pub struct ChunkState(crate::ChunkState);
diff --git a/src/lib.rs b/src/lib.rs
index 6ac32a1..cff55d2 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -79,7 +79,7 @@ mod test;
#[doc(hidden)]
pub mod guts;
-// The platform module is pub for benchmarks only. It is not stable.
+/// Undocumented and unstable, for benchmarks only.
#[doc(hidden)]
pub mod platform;
@@ -128,14 +128,8 @@ pub const OUT_LEN: usize = 32;
/// The number of bytes in a key, 32.
pub const KEY_LEN: usize = 32;
-// These constants are pub for incremental use cases like `bao`, as well as
-// tests and benchmarks. Most callers should not need them.
-#[doc(hidden)]
-pub const BLOCK_LEN: usize = 64;
-#[doc(hidden)]
-pub const CHUNK_LEN: usize = 1024;
-#[doc(hidden)]
-pub const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
+const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
+use guts::{BLOCK_LEN, CHUNK_LEN};
// While iterating the compression function within a chunk, the CV is
// represented as words, to avoid doing two extra endianness conversions for
@@ -540,7 +534,7 @@ impl fmt::Debug for ChunkState {
// use full-width SIMD vectors for parent hashing. Without parallel parent
// hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
-// pub for benchmarks
+/// Undocumented and unstable, for benchmarks only.
#[doc(hidden)]
#[derive(Clone, Copy)]
pub enum IncrementCounter {
diff --git a/test_vectors/src/lib.rs b/test_vectors/src/lib.rs
index 129bd16..ec769d1 100644
--- a/test_vectors/src/lib.rs
+++ b/test_vectors/src/lib.rs
@@ -1,9 +1,9 @@
-use blake3::{BLOCK_LEN, CHUNK_LEN};
+use blake3::guts::{BLOCK_LEN, CHUNK_LEN};
use serde::{Deserialize, Serialize};
// A non-multiple of 4 is important, since one possible bug is to fail to emit
// partial words.
-pub const OUTPUT_LEN: usize = 2 * blake3::BLOCK_LEN + 3;
+pub const OUTPUT_LEN: usize = 2 * BLOCK_LEN + 3;
pub const TEST_CASES: &[usize] = &[
0,