aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Boldyrev <[email protected]>2023-09-21 20:24:04 +0400
committerJack O'Connor <[email protected]>2025-03-16 21:24:06 -0700
commit2f863422ec7c4c16475a7b58c0f14674db1ab8a5 (patch)
tree597337c113ccc472a34c63e6ae5a4062a2da07c2
parent268868bd1690aa5a5e13349670c559342807c542 (diff)
Make some function safe
Certain functions' unsafety comes from v128 loads and store. If argument types guarantee that these loads and stores are safe, function is declared safe, and internal unsafe blocks are commented.
-rw-r--r--src/platform.rs10
-rw-r--r--src/wasm32_simd.rs35
2 files changed, 25 insertions, 20 deletions
diff --git a/src/platform.rs b/src/platform.rs
index 1c82603..51b3b7b 100644
--- a/src/platform.rs
+++ b/src/platform.rs
@@ -149,11 +149,10 @@ impl Platform {
// No NEON compress_in_place() implementation yet.
#[cfg(blake3_neon)]
Platform::NEON => portable::compress_in_place(cv, block, block_len, counter, flags),
- // Safe because is compiled for wasm32
#[cfg(blake3_wasm32_simd)]
- Platform::WASM32_SIMD => unsafe {
+ Platform::WASM32_SIMD => {
crate::wasm32_simd::compress_in_place(cv, block, block_len, counter, flags)
- },
+ }
}
}
@@ -187,10 +186,9 @@ impl Platform {
#[cfg(blake3_neon)]
Platform::NEON => portable::compress_xof(cv, block, block_len, counter, flags),
#[cfg(blake3_wasm32_simd)]
- // TODO Safe because compiled for wasm32
- Platform::WASM32_SIMD => unsafe {
+ Platform::WASM32_SIMD => {
crate::wasm32_simd::compress_xof(cv, block, block_len, counter, flags)
- },
+ }
}
}
diff --git a/src/wasm32_simd.rs b/src/wasm32_simd.rs
index 39948e7..1352491 100644
--- a/src/wasm32_simd.rs
+++ b/src/wasm32_simd.rs
@@ -170,15 +170,16 @@ fn undiagonalize(row0: &mut v128, row2: &mut v128, row3: &mut v128) {
}
#[inline(always)]
-unsafe fn compress_pre(
+fn compress_pre(
cv: &CVWords,
block: &[u8; BLOCK_LEN],
block_len: u8,
counter: u64,
flags: u8,
) -> [v128; 4] {
- let row0 = &mut loadu(cv.as_ptr().add(0) as *const u8);
- let row1 = &mut loadu(cv.as_ptr().add(4) as *const u8);
+ // safe because CVWords is [u32; 8]
+ let row0 = &mut unsafe { loadu(cv.as_ptr().add(0) as *const u8) };
+ let row1 = &mut unsafe { loadu(cv.as_ptr().add(4) as *const u8) };
let row2 = &mut set4(IV[0], IV[1], IV[2], IV[3]);
let row3 = &mut set4(
counter_low(counter),
@@ -187,10 +188,11 @@ unsafe fn compress_pre(
flags as u32,
);
- let mut m0 = loadu(block.as_ptr().add(0 * 4 * DEGREE));
- let mut m1 = loadu(block.as_ptr().add(1 * 4 * DEGREE));
- let mut m2 = loadu(block.as_ptr().add(2 * 4 * DEGREE));
- let mut m3 = loadu(block.as_ptr().add(3 * 4 * DEGREE));
+ // safe because block is &[u8; 64]
+ let mut m0 = unsafe { loadu(block.as_ptr().add(0 * 4 * DEGREE)) };
+ let mut m1 = unsafe { loadu(block.as_ptr().add(1 * 4 * DEGREE)) };
+ let mut m2 = unsafe { loadu(block.as_ptr().add(2 * 4 * DEGREE)) };
+ let mut m3 = unsafe { loadu(block.as_ptr().add(3 * 4 * DEGREE)) };
let mut t0;
let mut t1;
@@ -356,7 +358,7 @@ unsafe fn compress_pre(
}
#[target_feature(enable = "simd128")]
-pub unsafe fn compress_in_place(
+pub fn compress_in_place(
cv: &mut CVWords,
block: &[u8; BLOCK_LEN],
block_len: u8,
@@ -365,12 +367,15 @@ pub unsafe fn compress_in_place(
) {
let [row0, row1, row2, row3] = compress_pre(cv, block, block_len, counter, flags);
// it stores in reversed order...
- storeu(xor(row0, row2), cv.as_mut_ptr().add(0) as *mut u8);
- storeu(xor(row1, row3), cv.as_mut_ptr().add(4) as *mut u8);
+ // safe because CVWords is [u32; 8]
+ unsafe {
+ storeu(xor(row0, row2), cv.as_mut_ptr().add(0) as *mut u8);
+ storeu(xor(row1, row3), cv.as_mut_ptr().add(4) as *mut u8);
+ }
}
#[target_feature(enable = "simd128")]
-pub unsafe fn compress_xof(
+pub fn compress_xof(
cv: &CVWords,
block: &[u8; BLOCK_LEN],
block_len: u8,
@@ -381,10 +386,12 @@ pub unsafe fn compress_xof(
compress_pre(cv, block, block_len, counter, flags);
row0 = xor(row0, row2);
row1 = xor(row1, row3);
- row2 = xor(row2, loadu(cv.as_ptr().add(0) as *const u8));
- row3 = xor(row3, loadu(cv.as_ptr().add(4) as *const u8));
+ // safe because CVWords is [u32; 8]
+ row2 = xor(row2, unsafe { loadu(cv.as_ptr().add(0) as *const u8) });
+ row3 = xor(row3, unsafe { loadu(cv.as_ptr().add(4) as *const u8) });
// It seems to be architecture dependent, but works.
- core::mem::transmute([row0, row1, row2, row3])
+ // safe because sizes match, and every state of u8 is valid.
+ unsafe { core::mem::transmute([row0, row1, row2, row3]) }
}
#[inline(always)]