diff --git a/Cargo.lock b/Cargo.lock index d51b56b59..5a9dc41ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -940,6 +940,7 @@ dependencies = [ "libcrux", "libcrux-ecdh", "libcrux-hacl", + "libcrux-hacl-rs-krml", "libcrux-hkdf", "libcrux-hmac", "libcrux-kem", diff --git a/Cargo.toml b/Cargo.toml index 835db1f78..ae8f8bfc6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,23 +1,23 @@ [workspace] members = [ - "sys/hacl", - "sys/libjade", - "sys/platform", - "sys/pqclean", - "sys/lib25519", - "benchmarks", - "fuzz", - "libcrux-ml-kem", - "libcrux-sha3", - "libcrux-ml-dsa", - "libcrux-intrinsics", - "libcrux-kem", - "libcrux-hmac", - "libcrux-hkdf", - "libcrux-ecdh", - "libcrux-psq", - "cavp", + "sys/hacl", + "sys/libjade", + "sys/platform", + "sys/pqclean", + "sys/lib25519", + "benchmarks", + "fuzz", + "libcrux-ml-kem", + "libcrux-sha3", + "libcrux-ml-dsa", + "libcrux-intrinsics", + "libcrux-kem", + "libcrux-hmac", + "libcrux-hkdf", + "libcrux-ecdh", + "libcrux-psq", "libcrux-hacl-rs-krml", + "cavp", ] [workspace.package] @@ -44,15 +44,15 @@ readme.workspace = true documentation = "https://docs.rs/libcrux/" description = "The Formally Verified Cryptography Library" exclude = [ - "/tests", - "/specs", - "/proofs", - "/*.py", - "/wasm-demo", - "/fuzz", - "/git-hooks", - "/architecture", - "/libcrux.fst.config.json", + "/tests", + "/specs", + "/proofs", + "/*.py", + "/wasm-demo", + "/fuzz", + "/git-hooks", + "/architecture", + "/libcrux.fst.config.json", ] [lib] @@ -63,6 +63,7 @@ bench = false # so libtest doesn't eat the argumen libcrux-platform = { version = "=0.0.2-beta.2", path = "sys/platform" } [dependencies] +krml = { package = "libcrux-hacl-rs-krml", path = "libcrux-hacl-rs-krml" } libcrux-hacl = { version = "=0.0.2-beta.2", path = "sys/hacl" } libcrux-platform = { version = "=0.0.2-beta.2", path = "sys/platform" } libcrux-hkdf = { version = "=0.0.2-beta.2", path = "libcrux-hkdf" } @@ -113,11 +114,11 @@ panic = "abort" [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = [ - 'cfg(hax)', - 'cfg(eurydice)', - 'cfg(doc_cfg)', - 'cfg(libjade)', - 'cfg(simd128)', - 'cfg(simd256)', - 'cfg(aes_ni)', + 'cfg(hax)', + 'cfg(eurydice)', + 'cfg(doc_cfg)', + 'cfg(libjade)', + 'cfg(simd128)', + 'cfg(simd256)', + 'cfg(aes_ni)', ] } diff --git a/src/digest.rs b/src/digest.rs index 3f8dc4a4e..f40702a2f 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -291,10 +291,52 @@ macro_rules! impl_streaming { }; } impl_streaming!(Sha2_224, Sha224, Sha2_224Digest); -impl_streaming!(Sha2_256, Sha256, Sha2_256Digest); impl_streaming!(Sha2_384, Sha384, Sha2_384Digest); impl_streaming!(Sha2_512, Sha512, Sha2_512Digest); +// Streaming API - This is the recommended one. +// For implementations based on hacl_rs (over hacl-c) +macro_rules! impl_streaming_hacl_rs { + ($name:ident, $state:ty, $result:ty) => { + #[derive(Clone)] + pub struct $name { + state: $state, + } + impl $name { + /// Initialize a new digest state. + pub fn new() -> Self { + Self { + state: <$state>::new(), + } + } + + /// Add the `payload` to the digest. + pub fn update(&mut self, payload: &[u8]) { + self.state.update(payload); + } + + /// Get the digest. + /// + /// Note that the digest state can be continued to be used, to extend the + /// digest. + pub fn finish(&self, digest: &mut $result) { + self.state.finish(digest) + } + } + + impl Default for $name { + fn default() -> Self { + Self::new() + } + } + }; +} + +impl_streaming_hacl_rs!( + Sha2_256, + crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256, + Sha2_256Digest +); // SHAKE messages from SHA 3 #[cfg(simd256)] diff --git a/src/hacl_rs/fstar.rs b/src/hacl_rs/fstar.rs new file mode 100644 index 000000000..e15b79144 --- /dev/null +++ b/src/hacl_rs/fstar.rs @@ -0,0 +1,5 @@ +pub mod uint128; +pub mod uint16; +pub mod uint32; +pub mod uint64; +pub mod uint8; diff --git a/src/hacl_rs/fstar/uint128.rs b/src/hacl_rs/fstar/uint128.rs new file mode 100644 index 000000000..1c8b2446c --- /dev/null +++ b/src/hacl_rs/fstar/uint128.rs @@ -0,0 +1,79 @@ +#![allow(non_camel_case_types)] + +pub type uint128 = u128; + +pub fn add(x: uint128, y: uint128) -> uint128 { + x.wrapping_add(y) +} +pub fn add_mod(x: uint128, y: uint128) -> uint128 { + x.wrapping_add(y) +} +pub fn sub(x: uint128, y: uint128) -> uint128 { + x.wrapping_sub(y) +} +pub fn sub_mod(x: uint128, y: uint128) -> uint128 { + x.wrapping_sub(y) +} +pub fn logand(x: uint128, y: uint128) -> uint128 { + x & y +} +pub fn logxor(x: uint128, y: uint128) -> uint128 { + x ^ y +} +pub fn logor(x: uint128, y: uint128) -> uint128 { + x | y +} +pub fn lognot(x: uint128) -> uint128 { + !x +} +pub fn shift_left(x: uint128, y: u32) -> uint128 { + x.wrapping_shl(y) +} +pub fn shift_right(x: uint128, y: u32) -> uint128 { + x.wrapping_shr(y) +} +pub fn eq(x: uint128, y: uint128) -> bool { + x == y +} +pub fn gt(x: uint128, y: uint128) -> bool { + x > y +} +pub fn lt(x: uint128, y: uint128) -> bool { + x < y +} +pub fn gte(x: uint128, y: uint128) -> bool { + x >= y +} +pub fn lte(x: uint128, y: uint128) -> bool { + x <= y +} +pub fn eq_mask(a: uint128, b: uint128) -> uint128 { + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u128); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(127); + xnx.wrapping_sub(1u128) +} +pub fn gte_mask(a: uint128, b: uint128) -> uint128 { + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(127); + x_xor_q_.wrapping_sub(1u128) +} +pub fn uint64_to_uint128(x: u64) -> uint128 { + x as u128 +} +pub fn uint128_to_uint64(x: uint128) -> u64 { + x as u64 +} +pub fn mul32(x: u64, y: u32) -> uint128 { + (x as u128) * (y as u128) +} +pub fn mul_wide(x: u64, y: u64) -> uint128 { + (x as u128) * (y as u128) +} diff --git a/src/hacl_rs/fstar/uint16.rs b/src/hacl_rs/fstar/uint16.rs new file mode 100644 index 000000000..138e938f7 --- /dev/null +++ b/src/hacl_rs/fstar/uint16.rs @@ -0,0 +1,21 @@ +pub fn eq_mask(a: u16, b: u16) -> u16 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u16); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(15); + xnx.wrapping_sub(1u16) +} + +pub fn gte_mask(a: u16, b: u16) -> u16 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(15); + x_xor_q_.wrapping_sub(1u16) +} diff --git a/src/hacl_rs/fstar/uint32.rs b/src/hacl_rs/fstar/uint32.rs new file mode 100644 index 000000000..95520a4fd --- /dev/null +++ b/src/hacl_rs/fstar/uint32.rs @@ -0,0 +1,21 @@ +pub fn eq_mask(a: u32, b: u32) -> u32 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u32); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(31); + xnx.wrapping_sub(1u32) +} + +pub fn gte_mask(a: u32, b: u32) -> u32 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(31); + x_xor_q_.wrapping_sub(1u32) +} diff --git a/src/hacl_rs/fstar/uint64.rs b/src/hacl_rs/fstar/uint64.rs new file mode 100644 index 000000000..0c7d45896 --- /dev/null +++ b/src/hacl_rs/fstar/uint64.rs @@ -0,0 +1,21 @@ +pub fn eq_mask(a: u64, b: u64) -> u64 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u64); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(63); + xnx.wrapping_sub(1u64) +} + +pub fn gte_mask(a: u64, b: u64) -> u64 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(63); + x_xor_q_.wrapping_sub(1u64) +} diff --git a/src/hacl_rs/fstar/uint8.rs b/src/hacl_rs/fstar/uint8.rs new file mode 100644 index 000000000..0c80314d8 --- /dev/null +++ b/src/hacl_rs/fstar/uint8.rs @@ -0,0 +1,22 @@ +pub fn eq_mask(a: u8, b: u8) -> u8 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u8); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(7); + xnx.wrapping_sub(1u8) +} + +pub fn gte_mask(a: u8, b: u8) -> u8 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(7); + x_xor_q_.wrapping_sub(1u8) +} + diff --git a/src/hacl_rs/hash_sha1.rs b/src/hacl_rs/hash_sha1.rs new file mode 100644 index 000000000..2c6d4ffdf --- /dev/null +++ b/src/hacl_rs/hash_sha1.rs @@ -0,0 +1,378 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use super::lowstar; + +const _h0: [u32; 5] = [ + 0x67452301u32, + 0xefcdab89u32, + 0x98badcfeu32, + 0x10325476u32, + 0xc3d2e1f0u32, +]; + +pub(crate) fn init(s: &mut [u32]) { + krml::unroll_for!( + 5, + "i", + 0u32, + 1u32, + s[i as usize] = (&crate::hacl_rs::hash_sha1::_h0)[i as usize] + ) +} + +fn update(h: &mut [u32], l: &[u8]) { + let ha: u32 = h[0usize]; + let hb: u32 = h[1usize]; + let hc: u32 = h[2usize]; + let hd: u32 = h[3usize]; + let he: u32 = h[4usize]; + let mut _w: [u32; 80] = [0u32; 80usize]; + for i in 0u32..80u32 { + let v: u32 = if i < 16u32 { + let b: (&[u8], &[u8]) = l.split_at(i.wrapping_mul(4u32) as usize); + let u: u32 = lowstar::endianness::load32_be(b.1); + u + } else { + let wmit3: u32 = (&_w)[i.wrapping_sub(3u32) as usize]; + let wmit8: u32 = (&_w)[i.wrapping_sub(8u32) as usize]; + let wmit14: u32 = (&_w)[i.wrapping_sub(14u32) as usize]; + let wmit16: u32 = (&_w)[i.wrapping_sub(16u32) as usize]; + (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))).wrapping_shl(1u32) + | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))).wrapping_shr(31u32) + }; + (&mut _w)[i as usize] = v + } + for i in 0u32..80u32 { + let _a: u32 = h[0usize]; + let _b: u32 = h[1usize]; + let _c: u32 = h[2usize]; + let _d: u32 = h[3usize]; + let _e: u32 = h[4usize]; + let wmit: u32 = (&_w)[i as usize]; + let ite: u32 = if i < 20u32 { + _b & _c ^ !_b & _d + } else if 39u32 < i && i < 60u32 { + _b & _c ^ (_b & _d ^ _c & _d) + } else { + _b ^ (_c ^ _d) + }; + let ite0: u32 = if i < 20u32 { + 0x5a827999u32 + } else if i < 40u32 { + 0x6ed9eba1u32 + } else if i < 60u32 { + 0x8f1bbcdcu32 + } else { + 0xca62c1d6u32 + }; + let _T: u32 = (_a.wrapping_shl(5u32) | _a.wrapping_shr(27u32)) + .wrapping_add(ite) + .wrapping_add(_e) + .wrapping_add(ite0) + .wrapping_add(wmit); + h[0usize] = _T; + h[1usize] = _a; + h[2usize] = _b.wrapping_shl(30u32) | _b.wrapping_shr(2u32); + h[3usize] = _c; + h[4usize] = _d + } + for i in 0u32..80u32 { + (&mut _w)[i as usize] = 0u32 + } + let sta: u32 = h[0usize]; + let stb: u32 = h[1usize]; + let stc: u32 = h[2usize]; + let std: u32 = h[3usize]; + let ste: u32 = h[4usize]; + h[0usize] = sta.wrapping_add(ha); + h[1usize] = stb.wrapping_add(hb); + h[2usize] = stc.wrapping_add(hc); + h[3usize] = std.wrapping_add(hd); + h[4usize] = ste.wrapping_add(he) +} + +fn pad(len: u64, dst: &mut [u8]) { + let dst1: (&mut [u8], &mut [u8]) = dst.split_at_mut(0usize); + dst1.1[0usize] = 0x80u8; + let dst2: (&mut [u8], &mut [u8]) = dst1.1.split_at_mut(1usize); + for i in 0u32..128u32 + .wrapping_sub(9u32.wrapping_add(len.wrapping_rem(64u32 as u64) as u32)) + .wrapping_rem(64u32) + { + dst2.1[i as usize] = 0u8 + } + let dst3: (&mut [u8], &mut [u8]) = dst2.1.split_at_mut( + 128u32 + .wrapping_sub(9u32.wrapping_add(len.wrapping_rem(64u32 as u64) as u32)) + .wrapping_rem(64u32) as usize, + ); + lowstar::endianness::store64_be(dst3.1, len.wrapping_shl(3u32)) +} + +pub(crate) fn finish(s: &[u32], dst: &mut [u8]) { + krml::unroll_for!( + 5, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_be( + &mut dst[i.wrapping_mul(4u32) as usize..], + (&s[0usize..])[i as usize] + ) + ) +} + +pub(crate) fn update_multi(s: &mut [u32], blocks: &[u8], n_blocks: u32) { + for i in 0u32..n_blocks { + let sz: u32 = 64u32; + let block: (&[u8], &[u8]) = blocks.split_at(sz.wrapping_mul(i) as usize); + crate::hacl_rs::hash_sha1::update(s, block.1) + } +} + +pub(crate) fn update_last(s: &mut [u32], prev_len: u64, input: &[u8], input_len: u32) { + let blocks_n: u32 = input_len.wrapping_div(64u32); + let blocks_len: u32 = blocks_n.wrapping_mul(64u32); + let blocks: (&[u8], &[u8]) = input.split_at(0usize); + let rest_len: u32 = input_len.wrapping_sub(blocks_len); + let rest: (&[u8], &[u8]) = blocks.1.split_at(blocks_len as usize); + crate::hacl_rs::hash_sha1::update_multi(s, rest.0, blocks_n); + let total_input_len: u64 = prev_len.wrapping_add(input_len as u64); + let pad_len: u32 = 1u32 + .wrapping_add( + 128u32 + .wrapping_sub(9u32.wrapping_add(total_input_len.wrapping_rem(64u32 as u64) as u32)) + .wrapping_rem(64u32), + ) + .wrapping_add(8u32); + let tmp_len: u32 = rest_len.wrapping_add(pad_len); + let mut tmp_twoblocks: [u8; 128] = [0u8; 128usize]; + let tmp: (&mut [u8], &mut [u8]) = tmp_twoblocks.split_at_mut(0usize); + let tmp_rest: (&mut [u8], &mut [u8]) = tmp.1.split_at_mut(0usize); + let tmp_pad: (&mut [u8], &mut [u8]) = tmp_rest.1.split_at_mut(rest_len as usize); + (tmp_pad.0[0usize..rest_len as usize]).copy_from_slice(&rest.1[0usize..rest_len as usize]); + crate::hacl_rs::hash_sha1::pad(total_input_len, tmp_pad.1); + crate::hacl_rs::hash_sha1::update_multi(s, tmp.1, tmp_len.wrapping_div(64u32)) +} + +pub(crate) fn hash_oneshot(output: &mut [u8], input: &[u8], input_len: u32) { + let mut s: [u32; 5] = [ + 0x67452301u32, + 0xefcdab89u32, + 0x98badcfeu32, + 0x10325476u32, + 0xc3d2e1f0u32, + ]; + let blocks_n: u32 = input_len.wrapping_div(64u32); + let blocks_n1: u32 = if input_len.wrapping_rem(64u32) == 0u32 && blocks_n > 0u32 { + blocks_n.wrapping_sub(1u32) + } else { + blocks_n + }; + let blocks_len: u32 = blocks_n1.wrapping_mul(64u32); + let blocks: (&[u8], &[u8]) = input.split_at(0usize); + let rest_len: u32 = input_len.wrapping_sub(blocks_len); + let rest: (&[u8], &[u8]) = blocks.1.split_at(blocks_len as usize); + let blocks_n0: u32 = blocks_n1; + let blocks_len0: u32 = blocks_len; + let blocks0: &[u8] = rest.0; + let rest_len0: u32 = rest_len; + let rest0: &[u8] = rest.1; + crate::hacl_rs::hash_sha1::update_multi(&mut s, blocks0, blocks_n0); + crate::hacl_rs::hash_sha1::update_last(&mut s, blocks_len0 as u64, rest0, rest_len0); + crate::hacl_rs::hash_sha1::finish(&s, output) +} + +pub type state_t = crate::hacl_rs::streaming_types::state_32; + +pub fn malloc() -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + let mut block_state: Box<[u32]> = vec![0u32; 5usize].into_boxed_slice(); + crate::hacl_rs::hash_sha1::init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset(state: &mut [crate::hacl_rs::streaming_types::state_32]) { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha1::init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +/** +0 = success, 1 = max length exceeded +*/ +pub fn update0( + state: &mut [crate::hacl_rs::streaming_types::state_32], + chunk: &[u8], + chunk_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + let total_len: u64 = (state[0usize]).total_len; + if chunk_len as u64 > 2305843009213693951u64.wrapping_sub(total_len) { + crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + } else { + let sz: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + if chunk_len <= 64u32.wrapping_sub(sz) { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..chunk_len as usize]) + .copy_from_slice(&chunk[0usize..chunk_len as usize]); + let total_len2: u64 = total_len1.wrapping_add(chunk_len as u64); + (state[0usize]).total_len = total_len2 + } else if sz == 0u32 { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + if sz1 != 0u32 { + crate::hacl_rs::hash_sha1::update_multi(block_state, buf, 1u32) + }; + let ite: u32 = if (chunk_len as u64).wrapping_rem(64u32 as u64) == 0u64 + && chunk_len as u64 > 0u64 + { + 64u32 + } else { + (chunk_len as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len.wrapping_sub(ite).wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha1::update_multi( + block_state, + data2.0, + data1_len.wrapping_div(64u32), + ); + let dst: (&mut [u8], &mut [u8]) = buf.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = total_len1.wrapping_add(chunk_len as u64) + } else { + let diff: u32 = 64u32.wrapping_sub(sz); + let chunk1: (&[u8], &[u8]) = chunk.split_at(0usize); + let chunk2: (&[u8], &[u8]) = chunk1.1.split_at(diff as usize); + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..diff as usize]).copy_from_slice(&chunk2.0[0usize..diff as usize]); + let total_len2: u64 = total_len1.wrapping_add(diff as u64); + (state[0usize]).total_len = total_len2; + let buf0: &mut [u8] = &mut (state[0usize]).buf; + let total_len10: u64 = (state[0usize]).total_len; + let sz10: u32 = if total_len10.wrapping_rem(64u32 as u64) == 0u64 && total_len10 > 0u64 + { + 64u32 + } else { + total_len10.wrapping_rem(64u32 as u64) as u32 + }; + if sz10 != 0u32 { + crate::hacl_rs::hash_sha1::update_multi(block_state, buf0, 1u32) + }; + let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) + == 0u64 + && chunk_len.wrapping_sub(diff) as u64 > 0u64 + { + 64u32 + } else { + (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len + .wrapping_sub(diff) + .wrapping_sub(ite) + .wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha1::update_multi( + block_state, + data2.0, + data1_len.wrapping_div(64u32), + ); + let dst: (&mut [u8], &mut [u8]) = buf0.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = + total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) + }; + crate::hacl_rs::streaming_types::error_code::Success + } +} + +pub fn digest(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { + let block_state: &[u32] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u32; 5] = [0u32; 5usize]; + ((&mut tmp_block_state)[0usize..5usize]).copy_from_slice(&block_state[0usize..5usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(64u32) == 0u32 && r > 0u32 { + 64u32 + } else { + r.wrapping_rem(64u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha1::update_multi(&mut tmp_block_state, buf_last.0, 0u32); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha1::update_last(&mut tmp_block_state, prev_len_last, buf_last.1, r); + crate::hacl_rs::hash_sha1::finish(&tmp_block_state, output) +} + +pub fn copy( + state: &[crate::hacl_rs::streaming_types::state_32], +) -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let block_state0: &[u32] = &(state[0usize]).block_state; + let buf0: &[u8] = &(state[0usize]).buf; + let total_len0: u64 = (state[0usize]).total_len; + let mut buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + ((&mut buf)[0usize..64usize]).copy_from_slice(&buf0[0usize..64usize]); + let mut block_state: Box<[u32]> = vec![0u32; 5usize].into_boxed_slice(); + ((&mut block_state)[0usize..5usize]).copy_from_slice(&block_state0[0usize..5usize]); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: total_len0, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +pub fn hash(output: &mut [u8], input: &[u8], input_len: u32) { + crate::hacl_rs::hash_sha1::hash_oneshot(output, input, input_len) +} diff --git a/src/hacl_rs/hash_sha2.rs b/src/hacl_rs/hash_sha2.rs new file mode 100644 index 000000000..362deda67 --- /dev/null +++ b/src/hacl_rs/hash_sha2.rs @@ -0,0 +1,1310 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use super::fstar; +use super::lowstar; + +pub(crate) const h224: [u32; 8] = [ + 0xc1059ed8u32, + 0x367cd507u32, + 0x3070dd17u32, + 0xf70e5939u32, + 0xffc00b31u32, + 0x68581511u32, + 0x64f98fa7u32, + 0xbefa4fa4u32, +]; + +pub(crate) const h256: [u32; 8] = [ + 0x6a09e667u32, + 0xbb67ae85u32, + 0x3c6ef372u32, + 0xa54ff53au32, + 0x510e527fu32, + 0x9b05688cu32, + 0x1f83d9abu32, + 0x5be0cd19u32, +]; + +pub(crate) const h384: [u64; 8] = [ + 0xcbbb9d5dc1059ed8u64, + 0x629a292a367cd507u64, + 0x9159015a3070dd17u64, + 0x152fecd8f70e5939u64, + 0x67332667ffc00b31u64, + 0x8eb44a8768581511u64, + 0xdb0c2e0d64f98fa7u64, + 0x47b5481dbefa4fa4u64, +]; + +pub(crate) const h512: [u64; 8] = [ + 0x6a09e667f3bcc908u64, + 0xbb67ae8584caa73bu64, + 0x3c6ef372fe94f82bu64, + 0xa54ff53a5f1d36f1u64, + 0x510e527fade682d1u64, + 0x9b05688c2b3e6c1fu64, + 0x1f83d9abfb41bd6bu64, + 0x5be0cd19137e2179u64, +]; + +pub(crate) const k224_256: [u32; 64] = [ + 0x428a2f98u32, + 0x71374491u32, + 0xb5c0fbcfu32, + 0xe9b5dba5u32, + 0x3956c25bu32, + 0x59f111f1u32, + 0x923f82a4u32, + 0xab1c5ed5u32, + 0xd807aa98u32, + 0x12835b01u32, + 0x243185beu32, + 0x550c7dc3u32, + 0x72be5d74u32, + 0x80deb1feu32, + 0x9bdc06a7u32, + 0xc19bf174u32, + 0xe49b69c1u32, + 0xefbe4786u32, + 0x0fc19dc6u32, + 0x240ca1ccu32, + 0x2de92c6fu32, + 0x4a7484aau32, + 0x5cb0a9dcu32, + 0x76f988dau32, + 0x983e5152u32, + 0xa831c66du32, + 0xb00327c8u32, + 0xbf597fc7u32, + 0xc6e00bf3u32, + 0xd5a79147u32, + 0x06ca6351u32, + 0x14292967u32, + 0x27b70a85u32, + 0x2e1b2138u32, + 0x4d2c6dfcu32, + 0x53380d13u32, + 0x650a7354u32, + 0x766a0abbu32, + 0x81c2c92eu32, + 0x92722c85u32, + 0xa2bfe8a1u32, + 0xa81a664bu32, + 0xc24b8b70u32, + 0xc76c51a3u32, + 0xd192e819u32, + 0xd6990624u32, + 0xf40e3585u32, + 0x106aa070u32, + 0x19a4c116u32, + 0x1e376c08u32, + 0x2748774cu32, + 0x34b0bcb5u32, + 0x391c0cb3u32, + 0x4ed8aa4au32, + 0x5b9cca4fu32, + 0x682e6ff3u32, + 0x748f82eeu32, + 0x78a5636fu32, + 0x84c87814u32, + 0x8cc70208u32, + 0x90befffau32, + 0xa4506cebu32, + 0xbef9a3f7u32, + 0xc67178f2u32, +]; + +pub(crate) const k384_512: [u64; 80] = [ + 0x428a2f98d728ae22u64, + 0x7137449123ef65cdu64, + 0xb5c0fbcfec4d3b2fu64, + 0xe9b5dba58189dbbcu64, + 0x3956c25bf348b538u64, + 0x59f111f1b605d019u64, + 0x923f82a4af194f9bu64, + 0xab1c5ed5da6d8118u64, + 0xd807aa98a3030242u64, + 0x12835b0145706fbeu64, + 0x243185be4ee4b28cu64, + 0x550c7dc3d5ffb4e2u64, + 0x72be5d74f27b896fu64, + 0x80deb1fe3b1696b1u64, + 0x9bdc06a725c71235u64, + 0xc19bf174cf692694u64, + 0xe49b69c19ef14ad2u64, + 0xefbe4786384f25e3u64, + 0x0fc19dc68b8cd5b5u64, + 0x240ca1cc77ac9c65u64, + 0x2de92c6f592b0275u64, + 0x4a7484aa6ea6e483u64, + 0x5cb0a9dcbd41fbd4u64, + 0x76f988da831153b5u64, + 0x983e5152ee66dfabu64, + 0xa831c66d2db43210u64, + 0xb00327c898fb213fu64, + 0xbf597fc7beef0ee4u64, + 0xc6e00bf33da88fc2u64, + 0xd5a79147930aa725u64, + 0x06ca6351e003826fu64, + 0x142929670a0e6e70u64, + 0x27b70a8546d22ffcu64, + 0x2e1b21385c26c926u64, + 0x4d2c6dfc5ac42aedu64, + 0x53380d139d95b3dfu64, + 0x650a73548baf63deu64, + 0x766a0abb3c77b2a8u64, + 0x81c2c92e47edaee6u64, + 0x92722c851482353bu64, + 0xa2bfe8a14cf10364u64, + 0xa81a664bbc423001u64, + 0xc24b8b70d0f89791u64, + 0xc76c51a30654be30u64, + 0xd192e819d6ef5218u64, + 0xd69906245565a910u64, + 0xf40e35855771202au64, + 0x106aa07032bbd1b8u64, + 0x19a4c116b8d2d0c8u64, + 0x1e376c085141ab53u64, + 0x2748774cdf8eeb99u64, + 0x34b0bcb5e19b48a8u64, + 0x391c0cb3c5c95a63u64, + 0x4ed8aa4ae3418acbu64, + 0x5b9cca4f7763e373u64, + 0x682e6ff3d6b2b8a3u64, + 0x748f82ee5defb2fcu64, + 0x78a5636f43172f60u64, + 0x84c87814a1f0ab72u64, + 0x8cc702081a6439ecu64, + 0x90befffa23631e28u64, + 0xa4506cebde82bde9u64, + 0xbef9a3f7b2c67915u64, + 0xc67178f2e372532bu64, + 0xca273eceea26619cu64, + 0xd186b8c721c0c207u64, + 0xeada7dd6cde0eb1eu64, + 0xf57d4f7fee6ed178u64, + 0x06f067aa72176fbau64, + 0x0a637dc5a2c898a6u64, + 0x113f9804bef90daeu64, + 0x1b710b35131c471bu64, + 0x28db77f523047d84u64, + 0x32caab7b40c72493u64, + 0x3c9ebe0a15c9bebcu64, + 0x431d67c49c100d4cu64, + 0x4cc5d4becb3e42b6u64, + 0x597f299cfc657e2au64, + 0x5fcb6fab3ad6faecu64, + 0x6c44198c4a475817u64, +]; + +pub(crate) fn sha256_init(hash: &mut [u32]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (&crate::hacl_rs::hash_sha2::h256)[i as usize]; + let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn sha256_update(b: &[u8], hash: &mut [u32]) { + let mut hash_old: [u32; 8] = [0u32; 8usize]; + let mut ws: [u32; 16] = [0u32; 16usize]; + ((&mut hash_old)[0usize..8usize]).copy_from_slice(&hash[0usize..8usize]); + let b1: &[u8] = b; + let u: u32 = lowstar::endianness::load32_be(&b1[0usize..]); + (&mut ws)[0usize] = u; + let u0: u32 = lowstar::endianness::load32_be(&b1[4usize..]); + (&mut ws)[1usize] = u0; + let u1: u32 = lowstar::endianness::load32_be(&b1[8usize..]); + (&mut ws)[2usize] = u1; + let u2: u32 = lowstar::endianness::load32_be(&b1[12usize..]); + (&mut ws)[3usize] = u2; + let u3: u32 = lowstar::endianness::load32_be(&b1[16usize..]); + (&mut ws)[4usize] = u3; + let u4: u32 = lowstar::endianness::load32_be(&b1[20usize..]); + (&mut ws)[5usize] = u4; + let u5: u32 = lowstar::endianness::load32_be(&b1[24usize..]); + (&mut ws)[6usize] = u5; + let u6: u32 = lowstar::endianness::load32_be(&b1[28usize..]); + (&mut ws)[7usize] = u6; + let u7: u32 = lowstar::endianness::load32_be(&b1[32usize..]); + (&mut ws)[8usize] = u7; + let u8: u32 = lowstar::endianness::load32_be(&b1[36usize..]); + (&mut ws)[9usize] = u8; + let u9: u32 = lowstar::endianness::load32_be(&b1[40usize..]); + (&mut ws)[10usize] = u9; + let u10: u32 = lowstar::endianness::load32_be(&b1[44usize..]); + (&mut ws)[11usize] = u10; + let u11: u32 = lowstar::endianness::load32_be(&b1[48usize..]); + (&mut ws)[12usize] = u11; + let u12: u32 = lowstar::endianness::load32_be(&b1[52usize..]); + (&mut ws)[13usize] = u12; + let u13: u32 = lowstar::endianness::load32_be(&b1[56usize..]); + (&mut ws)[14usize] = u13; + let u14: u32 = lowstar::endianness::load32_be(&b1[60usize..]); + (&mut ws)[15usize] = u14; + krml::unroll_for!(4, "i", 0u32, 1u32, { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let k_t: u32 = (&crate::hacl_rs::hash_sha2::k224_256) + [16u32.wrapping_mul(i).wrapping_add(i0) as usize]; + let ws_t: u32 = (&ws)[i0 as usize]; + let a0: u32 = hash[0usize]; + let b0: u32 = hash[1usize]; + let c0: u32 = hash[2usize]; + let d0: u32 = hash[3usize]; + let e0: u32 = hash[4usize]; + let f0: u32 = hash[5usize]; + let g0: u32 = hash[6usize]; + let h02: u32 = hash[7usize]; + let k_e_t: u32 = k_t; + let t1: u32 = h02 + .wrapping_add( + (e0.wrapping_shl(26u32) | e0.wrapping_shr(6u32)) + ^ ((e0.wrapping_shl(21u32) | e0.wrapping_shr(11u32)) + ^ (e0.wrapping_shl(7u32) | e0.wrapping_shr(25u32))), + ) + .wrapping_add(e0 & f0 ^ !e0 & g0) + .wrapping_add(k_e_t) + .wrapping_add(ws_t); + let t2: u32 = ((a0.wrapping_shl(30u32) | a0.wrapping_shr(2u32)) + ^ ((a0.wrapping_shl(19u32) | a0.wrapping_shr(13u32)) + ^ (a0.wrapping_shl(10u32) | a0.wrapping_shr(22u32)))) + .wrapping_add(a0 & b0 ^ (a0 & c0 ^ b0 & c0)); + let a1: u32 = t1.wrapping_add(t2); + let b10: u32 = a0; + let c1: u32 = b0; + let d1: u32 = c0; + let e1: u32 = d0.wrapping_add(t1); + let f1: u32 = e0; + let g1: u32 = f0; + let h12: u32 = g0; + hash[0usize] = a1; + hash[1usize] = b10; + hash[2usize] = c1; + hash[3usize] = d1; + hash[4usize] = e1; + hash[5usize] = f1; + hash[6usize] = g1; + hash[7usize] = h12 + }); + if i < 3u32 { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let t16: u32 = (&ws)[i0 as usize]; + let t15: u32 = (&ws)[i0.wrapping_add(1u32).wrapping_rem(16u32) as usize]; + let t7: u32 = (&ws)[i0.wrapping_add(9u32).wrapping_rem(16u32) as usize]; + let t2: u32 = (&ws)[i0.wrapping_add(14u32).wrapping_rem(16u32) as usize]; + let s1: u32 = (t2.wrapping_shl(15u32) | t2.wrapping_shr(17u32)) + ^ ((t2.wrapping_shl(13u32) | t2.wrapping_shr(19u32)) ^ t2.wrapping_shr(10u32)); + let s0: u32 = (t15.wrapping_shl(25u32) | t15.wrapping_shr(7u32)) + ^ ((t15.wrapping_shl(14u32) | t15.wrapping_shr(18u32)) + ^ t15.wrapping_shr(3u32)); + (&mut ws)[i0 as usize] = s1.wrapping_add(t7).wrapping_add(s0).wrapping_add(t16) + }) + } + }); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (hash[i as usize]).wrapping_add((&hash_old)[i as usize]); + let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +pub(crate) fn sha256_update_nblocks(len: u32, b: &[u8], st: &mut [u32]) { + let blocks: u32 = len.wrapping_div(64u32); + for i in 0u32..blocks { + let b0: &[u8] = b; + let mb: (&[u8], &[u8]) = b0.split_at(i.wrapping_mul(64u32) as usize); + crate::hacl_rs::hash_sha2::sha256_update(mb.1, st) + } +} + +pub(crate) fn sha256_update_last(totlen: u64, len: u32, b: &[u8], hash: &mut [u32]) { + let blocks: u32 = if len.wrapping_add(8u32).wrapping_add(1u32) <= 64u32 { + 1u32 + } else { + 2u32 + }; + let fin: u32 = blocks.wrapping_mul(64u32); + let mut last: [u8; 128] = [0u8; 128usize]; + let mut totlen_buf: [u8; 8] = [0u8; 8usize]; + let total_len_bits: u64 = totlen.wrapping_shl(3u32); + lowstar::endianness::store64_be(&mut totlen_buf, total_len_bits); + let b0: &[u8] = b; + ((&mut last)[0usize..len as usize]).copy_from_slice(&b0[0usize..len as usize]); + (&mut last)[len as usize] = 0x80u8; + ((&mut last)[fin.wrapping_sub(8u32) as usize..fin.wrapping_sub(8u32) as usize + 8usize]) + .copy_from_slice(&(&totlen_buf)[0usize..8usize]); + let last0: (&[u8], &[u8]) = last.split_at(0usize); + let last1: (&[u8], &[u8]) = last0.1.split_at(64usize); + let l0: &[u8] = last1.0; + let l1: &[u8] = last1.1; + let lb0: &[u8] = l0; + let lb1: &[u8] = l1; + let last00: &[u8] = lb0; + let last10: &[u8] = lb1; + crate::hacl_rs::hash_sha2::sha256_update(last00, hash); + if blocks > 1u32 { + crate::hacl_rs::hash_sha2::sha256_update(last10, hash) + } +} + +pub(crate) fn sha256_finish(st: &[u32], h: &mut [u8]) { + let mut hbuf: [u8; 32] = [0u8; 32usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_be( + &mut (&mut hbuf)[i.wrapping_mul(4u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..32usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..32usize]) +} + +#[inline] +fn sha224_init(hash: &mut [u32]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (&crate::hacl_rs::hash_sha2::h224)[i as usize]; + let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn sha224_update_nblocks(len: u32, b: &[u8], st: &mut [u32]) { + crate::hacl_rs::hash_sha2::sha256_update_nblocks(len, b, st) +} + +fn sha224_update_last(totlen: u64, len: u32, b: &[u8], st: &mut [u32]) { + crate::hacl_rs::hash_sha2::sha256_update_last(totlen, len, b, st) +} + +#[inline] +fn sha224_finish(st: &[u32], h: &mut [u8]) { + let mut hbuf: [u8; 32] = [0u8; 32usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_be( + &mut (&mut hbuf)[i.wrapping_mul(4u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..28usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..28usize]) +} + +pub(crate) fn sha512_init(hash: &mut [u64]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h512)[i as usize]; + let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn sha512_update(b: &[u8], hash: &mut [u64]) { + let mut hash_old: [u64; 8] = [0u64; 8usize]; + let mut ws: [u64; 16] = [0u64; 16usize]; + ((&mut hash_old)[0usize..8usize]).copy_from_slice(&hash[0usize..8usize]); + let b1: &[u8] = b; + let u: u64 = lowstar::endianness::load64_be(&b1[0usize..]); + (&mut ws)[0usize] = u; + let u0: u64 = lowstar::endianness::load64_be(&b1[8usize..]); + (&mut ws)[1usize] = u0; + let u1: u64 = lowstar::endianness::load64_be(&b1[16usize..]); + (&mut ws)[2usize] = u1; + let u2: u64 = lowstar::endianness::load64_be(&b1[24usize..]); + (&mut ws)[3usize] = u2; + let u3: u64 = lowstar::endianness::load64_be(&b1[32usize..]); + (&mut ws)[4usize] = u3; + let u4: u64 = lowstar::endianness::load64_be(&b1[40usize..]); + (&mut ws)[5usize] = u4; + let u5: u64 = lowstar::endianness::load64_be(&b1[48usize..]); + (&mut ws)[6usize] = u5; + let u6: u64 = lowstar::endianness::load64_be(&b1[56usize..]); + (&mut ws)[7usize] = u6; + let u7: u64 = lowstar::endianness::load64_be(&b1[64usize..]); + (&mut ws)[8usize] = u7; + let u8: u64 = lowstar::endianness::load64_be(&b1[72usize..]); + (&mut ws)[9usize] = u8; + let u9: u64 = lowstar::endianness::load64_be(&b1[80usize..]); + (&mut ws)[10usize] = u9; + let u10: u64 = lowstar::endianness::load64_be(&b1[88usize..]); + (&mut ws)[11usize] = u10; + let u11: u64 = lowstar::endianness::load64_be(&b1[96usize..]); + (&mut ws)[12usize] = u11; + let u12: u64 = lowstar::endianness::load64_be(&b1[104usize..]); + (&mut ws)[13usize] = u12; + let u13: u64 = lowstar::endianness::load64_be(&b1[112usize..]); + (&mut ws)[14usize] = u13; + let u14: u64 = lowstar::endianness::load64_be(&b1[120usize..]); + (&mut ws)[15usize] = u14; + krml::unroll_for!(5, "i", 0u32, 1u32, { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let k_t: u64 = (&crate::hacl_rs::hash_sha2::k384_512) + [16u32.wrapping_mul(i).wrapping_add(i0) as usize]; + let ws_t: u64 = (&ws)[i0 as usize]; + let a0: u64 = hash[0usize]; + let b0: u64 = hash[1usize]; + let c0: u64 = hash[2usize]; + let d0: u64 = hash[3usize]; + let e0: u64 = hash[4usize]; + let f0: u64 = hash[5usize]; + let g0: u64 = hash[6usize]; + let h02: u64 = hash[7usize]; + let k_e_t: u64 = k_t; + let t1: u64 = h02 + .wrapping_add( + (e0.wrapping_shl(50u32) | e0.wrapping_shr(14u32)) + ^ ((e0.wrapping_shl(46u32) | e0.wrapping_shr(18u32)) + ^ (e0.wrapping_shl(23u32) | e0.wrapping_shr(41u32))), + ) + .wrapping_add(e0 & f0 ^ !e0 & g0) + .wrapping_add(k_e_t) + .wrapping_add(ws_t); + let t2: u64 = ((a0.wrapping_shl(36u32) | a0.wrapping_shr(28u32)) + ^ ((a0.wrapping_shl(30u32) | a0.wrapping_shr(34u32)) + ^ (a0.wrapping_shl(25u32) | a0.wrapping_shr(39u32)))) + .wrapping_add(a0 & b0 ^ (a0 & c0 ^ b0 & c0)); + let a1: u64 = t1.wrapping_add(t2); + let b10: u64 = a0; + let c1: u64 = b0; + let d1: u64 = c0; + let e1: u64 = d0.wrapping_add(t1); + let f1: u64 = e0; + let g1: u64 = f0; + let h12: u64 = g0; + hash[0usize] = a1; + hash[1usize] = b10; + hash[2usize] = c1; + hash[3usize] = d1; + hash[4usize] = e1; + hash[5usize] = f1; + hash[6usize] = g1; + hash[7usize] = h12 + }); + if i < 4u32 { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let t16: u64 = (&ws)[i0 as usize]; + let t15: u64 = (&ws)[i0.wrapping_add(1u32).wrapping_rem(16u32) as usize]; + let t7: u64 = (&ws)[i0.wrapping_add(9u32).wrapping_rem(16u32) as usize]; + let t2: u64 = (&ws)[i0.wrapping_add(14u32).wrapping_rem(16u32) as usize]; + let s1: u64 = (t2.wrapping_shl(45u32) | t2.wrapping_shr(19u32)) + ^ ((t2.wrapping_shl(3u32) | t2.wrapping_shr(61u32)) ^ t2.wrapping_shr(6u32)); + let s0: u64 = (t15.wrapping_shl(63u32) | t15.wrapping_shr(1u32)) + ^ ((t15.wrapping_shl(56u32) | t15.wrapping_shr(8u32)) ^ t15.wrapping_shr(7u32)); + (&mut ws)[i0 as usize] = s1.wrapping_add(t7).wrapping_add(s0).wrapping_add(t16) + }) + } + }); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (hash[i as usize]).wrapping_add((&hash_old)[i as usize]); + let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +pub(crate) fn sha512_update_nblocks(len: u32, b: &[u8], st: &mut [u64]) { + let blocks: u32 = len.wrapping_div(128u32); + for i in 0u32..blocks { + let b0: &[u8] = b; + let mb: (&[u8], &[u8]) = b0.split_at(i.wrapping_mul(128u32) as usize); + crate::hacl_rs::hash_sha2::sha512_update(mb.1, st) + } +} + +pub(crate) fn sha512_update_last( + totlen: fstar::uint128::uint128, + len: u32, + b: &[u8], + hash: &mut [u64], +) { + let blocks: u32 = if len.wrapping_add(16u32).wrapping_add(1u32) <= 128u32 { + 1u32 + } else { + 2u32 + }; + let fin: u32 = blocks.wrapping_mul(128u32); + let mut last: [u8; 256] = [0u8; 256usize]; + let mut totlen_buf: [u8; 16] = [0u8; 16usize]; + let total_len_bits: fstar::uint128::uint128 = fstar::uint128::shift_left(totlen, 3u32); + lowstar::endianness::store128_be(&mut totlen_buf, total_len_bits); + let b0: &[u8] = b; + ((&mut last)[0usize..len as usize]).copy_from_slice(&b0[0usize..len as usize]); + (&mut last)[len as usize] = 0x80u8; + ((&mut last)[fin.wrapping_sub(16u32) as usize..fin.wrapping_sub(16u32) as usize + 16usize]) + .copy_from_slice(&(&totlen_buf)[0usize..16usize]); + let last0: (&[u8], &[u8]) = last.split_at(0usize); + let last1: (&[u8], &[u8]) = last0.1.split_at(128usize); + let l0: &[u8] = last1.0; + let l1: &[u8] = last1.1; + let lb0: &[u8] = l0; + let lb1: &[u8] = l1; + let last00: &[u8] = lb0; + let last10: &[u8] = lb1; + crate::hacl_rs::hash_sha2::sha512_update(last00, hash); + if blocks > 1u32 { + crate::hacl_rs::hash_sha2::sha512_update(last10, hash) + } +} + +pub(crate) fn sha512_finish(st: &[u64], h: &mut [u8]) { + let mut hbuf: [u8; 64] = [0u8; 64usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store64_be( + &mut (&mut hbuf)[i.wrapping_mul(8u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..64usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..64usize]) +} + +pub(crate) fn sha384_init(hash: &mut [u64]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h384)[i as usize]; + let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +pub(crate) fn sha384_update_nblocks(len: u32, b: &[u8], st: &mut [u64]) { + crate::hacl_rs::hash_sha2::sha512_update_nblocks(len, b, st) +} + +pub(crate) fn sha384_update_last( + totlen: fstar::uint128::uint128, + len: u32, + b: &[u8], + st: &mut [u64], +) { + crate::hacl_rs::hash_sha2::sha512_update_last(totlen, len, b, st) +} + +pub(crate) fn sha384_finish(st: &[u64], h: &mut [u8]) { + let mut hbuf: [u8; 64] = [0u8; 64usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store64_be( + &mut (&mut hbuf)[i.wrapping_mul(8u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..48usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..48usize]) +} + +pub type state_t_224 = crate::hacl_rs::streaming_types::state_32; + +pub type state_t_256 = crate::hacl_rs::streaming_types::state_32; + +pub type state_t_384 = crate::hacl_rs::streaming_types::state_64; + +pub type state_t_512 = crate::hacl_rs::streaming_types::state_64; + +/** +Allocate initial state for the SHA2_256 hash. The state is to be freed by +calling `free_256`. +*/ +pub fn malloc_256() -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha256_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +/** +Copies the state passed as argument into a newly allocated state (deep copy). +The state is to be freed by calling `free_256`. Cloning the state this way is +useful, for instance, if your control-flow diverges and you need to feed +more (different) data into the hash in each branch. +*/ +pub fn copy_256( + state: &[crate::hacl_rs::streaming_types::state_32], +) -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let block_state0: &[u32] = &(state[0usize]).block_state; + let buf0: &[u8] = &(state[0usize]).buf; + let total_len0: u64 = (state[0usize]).total_len; + let mut buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + ((&mut buf)[0usize..64usize]).copy_from_slice(&buf0[0usize..64usize]); + let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + ((&mut block_state)[0usize..8usize]).copy_from_slice(&block_state0[0usize..8usize]); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: total_len0, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +/** +Reset an existing state to the initial hash state with empty data. +*/ +pub fn reset_256(state: &mut [crate::hacl_rs::streaming_types::state_32]) { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha256_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +#[inline] +fn update_224_256( + state: &mut [crate::hacl_rs::streaming_types::state_32], + chunk: &[u8], + chunk_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + let total_len: u64 = (state[0usize]).total_len; + if chunk_len as u64 > 2305843009213693951u64.wrapping_sub(total_len) { + crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + } else { + let sz: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + if chunk_len <= 64u32.wrapping_sub(sz) { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..chunk_len as usize]) + .copy_from_slice(&chunk[0usize..chunk_len as usize]); + let total_len2: u64 = total_len1.wrapping_add(chunk_len as u64); + (state[0usize]).total_len = total_len2 + } else if sz == 0u32 { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + if sz1 != 0u32 { + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, buf, block_state) + }; + let ite: u32 = if (chunk_len as u64).wrapping_rem(64u32 as u64) == 0u64 + && chunk_len as u64 > 0u64 + { + 64u32 + } else { + (chunk_len as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len.wrapping_sub(ite).wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks( + data1_len.wrapping_div(64u32).wrapping_mul(64u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = total_len1.wrapping_add(chunk_len as u64) + } else { + let diff: u32 = 64u32.wrapping_sub(sz); + let chunk1: (&[u8], &[u8]) = chunk.split_at(0usize); + let chunk2: (&[u8], &[u8]) = chunk1.1.split_at(diff as usize); + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..diff as usize]).copy_from_slice(&chunk2.0[0usize..diff as usize]); + let total_len2: u64 = total_len1.wrapping_add(diff as u64); + (state[0usize]).total_len = total_len2; + let buf0: &mut [u8] = &mut (state[0usize]).buf; + let total_len10: u64 = (state[0usize]).total_len; + let sz10: u32 = if total_len10.wrapping_rem(64u32 as u64) == 0u64 && total_len10 > 0u64 + { + 64u32 + } else { + total_len10.wrapping_rem(64u32 as u64) as u32 + }; + if sz10 != 0u32 { + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, buf0, block_state) + }; + let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) + == 0u64 + && chunk_len.wrapping_sub(diff) as u64 > 0u64 + { + 64u32 + } else { + (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len + .wrapping_sub(diff) + .wrapping_sub(ite) + .wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks( + data1_len.wrapping_div(64u32).wrapping_mul(64u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf0.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = + total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) + }; + crate::hacl_rs::streaming_types::error_code::Success + } +} + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_256` +(since the last call to `reset_256`) exceeds 2^61-1 bytes. + +This function is identical to the update function for SHA2_224. +*/ +pub fn update_256( + state: &mut [crate::hacl_rs::streaming_types::state_32], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_224_256(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 32 bytes. The state remains +valid after a call to `digest_256`, meaning the user may feed more data into +the hash via `update_256`. (The digest_256 function operates on an internal copy of +the state and therefore does not invalidate the client-held state `p`.) +*/ +pub fn digest_256(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { + let block_state: &[u32] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u32; 8] = [0u32; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(64u32) == 0u32 && r > 0u32 { + 64u32 + } else { + r.wrapping_rem(64u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha256_update_last( + prev_len_last.wrapping_add(r as u64), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha256_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 32 bytes. +*/ +pub fn hash_256(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u32; 8] = [0u32; 8usize]; + crate::hacl_rs::hash_sha2::sha256_init(&mut st); + let rem: u32 = input_len.wrapping_rem(64u32); + let len·: u64 = input_len as u64; + crate::hacl_rs::hash_sha2::sha256_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(64u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha256_update_last(len·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha256_finish(&st, rb) +} + +pub fn malloc_224() -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha224_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset_224(state: &mut [crate::hacl_rs::streaming_types::state_32]) { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha224_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +pub fn update_224( + state: &mut [crate::hacl_rs::streaming_types::state_32], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_224_256(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 28 bytes. The state remains +valid after a call to `digest_224`, meaning the user may feed more data into +the hash via `update_224`. +*/ +pub fn digest_224(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { + let block_state: &[u32] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u32; 8] = [0u32; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(64u32) == 0u32 && r > 0u32 { + 64u32 + } else { + r.wrapping_rem(64u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha224_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha224_update_last( + prev_len_last.wrapping_add(r as u64), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha224_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 28 bytes. +*/ +pub fn hash_224(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u32; 8] = [0u32; 8usize]; + crate::hacl_rs::hash_sha2::sha224_init(&mut st); + let rem: u32 = input_len.wrapping_rem(64u32); + let len·: u64 = input_len as u64; + crate::hacl_rs::hash_sha2::sha224_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(64u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha224_update_last(len·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha224_finish(&st, rb) +} + +pub fn malloc_512() -> Box<[crate::hacl_rs::streaming_types::state_64]> { + let buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); + let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha512_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + p +} + +/** +Copies the state passed as argument into a newly allocated state (deep copy). +The state is to be freed by calling `free_512`. Cloning the state this way is +useful, for instance, if your control-flow diverges and you need to feed +more (different) data into the hash in each branch. +*/ +pub fn copy_512( + state: &[crate::hacl_rs::streaming_types::state_64], +) -> Box<[crate::hacl_rs::streaming_types::state_64]> { + let block_state0: &[u64] = &(state[0usize]).block_state; + let buf0: &[u8] = &(state[0usize]).buf; + let total_len0: u64 = (state[0usize]).total_len; + let mut buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); + ((&mut buf)[0usize..128usize]).copy_from_slice(&buf0[0usize..128usize]); + let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); + ((&mut block_state)[0usize..8usize]).copy_from_slice(&block_state0[0usize..8usize]); + let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + block_state, + buf, + total_len: total_len0, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset_512(state: &mut [crate::hacl_rs::streaming_types::state_64]) { + let block_state: &mut [u64] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha512_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +#[inline] +fn update_384_512( + state: &mut [crate::hacl_rs::streaming_types::state_64], + chunk: &[u8], + chunk_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + let block_state: &mut [u64] = &mut (state[0usize]).block_state; + let total_len: u64 = (state[0usize]).total_len; + if chunk_len as u64 > 18446744073709551615u64.wrapping_sub(total_len) { + crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + } else { + let sz: u32 = if total_len.wrapping_rem(128u32 as u64) == 0u64 && total_len > 0u64 { + 128u32 + } else { + total_len.wrapping_rem(128u32 as u64) as u32 + }; + if chunk_len <= 128u32.wrapping_sub(sz) { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(128u32 as u64) == 0u64 && total_len1 > 0u64 { + 128u32 + } else { + total_len1.wrapping_rem(128u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..chunk_len as usize]) + .copy_from_slice(&chunk[0usize..chunk_len as usize]); + let total_len2: u64 = total_len1.wrapping_add(chunk_len as u64); + (state[0usize]).total_len = total_len2 + } else if sz == 0u32 { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(128u32 as u64) == 0u64 && total_len1 > 0u64 { + 128u32 + } else { + total_len1.wrapping_rem(128u32 as u64) as u32 + }; + if sz1 != 0u32 { + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, buf, block_state) + }; + let ite: u32 = if (chunk_len as u64).wrapping_rem(128u32 as u64) == 0u64 + && chunk_len as u64 > 0u64 + { + 128u32 + } else { + (chunk_len as u64).wrapping_rem(128u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len.wrapping_sub(ite).wrapping_div(128u32); + let data1_len: u32 = n_blocks.wrapping_mul(128u32); + let data2_len: u32 = chunk_len.wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks( + data1_len.wrapping_div(128u32).wrapping_mul(128u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = total_len1.wrapping_add(chunk_len as u64) + } else { + let diff: u32 = 128u32.wrapping_sub(sz); + let chunk1: (&[u8], &[u8]) = chunk.split_at(0usize); + let chunk2: (&[u8], &[u8]) = chunk1.1.split_at(diff as usize); + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(128u32 as u64) == 0u64 && total_len1 > 0u64 { + 128u32 + } else { + total_len1.wrapping_rem(128u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..diff as usize]).copy_from_slice(&chunk2.0[0usize..diff as usize]); + let total_len2: u64 = total_len1.wrapping_add(diff as u64); + (state[0usize]).total_len = total_len2; + let buf0: &mut [u8] = &mut (state[0usize]).buf; + let total_len10: u64 = (state[0usize]).total_len; + let sz10: u32 = if total_len10.wrapping_rem(128u32 as u64) == 0u64 && total_len10 > 0u64 + { + 128u32 + } else { + total_len10.wrapping_rem(128u32 as u64) as u32 + }; + if sz10 != 0u32 { + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, buf0, block_state) + }; + let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(128u32 as u64) + == 0u64 + && chunk_len.wrapping_sub(diff) as u64 > 0u64 + { + 128u32 + } else { + (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(128u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len + .wrapping_sub(diff) + .wrapping_sub(ite) + .wrapping_div(128u32); + let data1_len: u32 = n_blocks.wrapping_mul(128u32); + let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks( + data1_len.wrapping_div(128u32).wrapping_mul(128u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf0.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = + total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) + }; + crate::hacl_rs::streaming_types::error_code::Success + } +} + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_512` +(since the last call to `reset_512`) exceeds 2^125-1 bytes. + +This function is identical to the update function for SHA2_384. +*/ +pub fn update_512( + state: &mut [crate::hacl_rs::streaming_types::state_64], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_384_512(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 64 bytes. The state remains +valid after a call to `digest_512`, meaning the user may feed more data into +the hash via `update_512`. (The digest_512 function operates on an internal copy of +the state and therefore does not invalidate the client-held state `p`.) +*/ +pub fn digest_512(state: &[crate::hacl_rs::streaming_types::state_64], output: &mut [u8]) { + let block_state: &[u64] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(128u32 as u64) == 0u64 && total_len > 0u64 { + 128u32 + } else { + total_len.wrapping_rem(128u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u64; 8] = [0u64; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(128u32) == 0u32 && r > 0u32 { + 128u32 + } else { + r.wrapping_rem(128u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(prev_len_last), + fstar::uint128::uint64_to_uint128(r as u64), + ), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha512_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 64 bytes. +*/ +pub fn hash_512(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u64; 8] = [0u64; 8usize]; + crate::hacl_rs::hash_sha2::sha512_init(&mut st); + let rem: u32 = input_len.wrapping_rem(128u32); + let len·: fstar::uint128::uint128 = fstar::uint128::uint64_to_uint128(input_len as u64); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(128u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha512_update_last(len·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha512_finish(&st, rb) +} + +pub fn malloc_384() -> Box<[crate::hacl_rs::streaming_types::state_64]> { + let buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); + let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha384_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset_384(state: &mut [crate::hacl_rs::streaming_types::state_64]) { + let block_state: &mut [u64] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha384_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +pub fn update_384( + state: &mut [crate::hacl_rs::streaming_types::state_64], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_384_512(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 48 bytes. The state remains +valid after a call to `digest_384`, meaning the user may feed more data into +the hash via `update_384`. +*/ +pub fn digest_384(state: &[crate::hacl_rs::streaming_types::state_64], output: &mut [u8]) { + let block_state: &[u64] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(128u32 as u64) == 0u64 && total_len > 0u64 { + 128u32 + } else { + total_len.wrapping_rem(128u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u64; 8] = [0u64; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(128u32) == 0u32 && r > 0u32 { + 128u32 + } else { + r.wrapping_rem(128u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(prev_len_last), + fstar::uint128::uint64_to_uint128(r as u64), + ), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha384_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 48 bytes. +*/ +pub fn hash_384(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u64; 8] = [0u64; 8usize]; + crate::hacl_rs::hash_sha2::sha384_init(&mut st); + let rem: u32 = input_len.wrapping_rem(128u32); + let len·: fstar::uint128::uint128 = fstar::uint128::uint64_to_uint128(input_len as u64); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(128u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha384_update_last(len·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha384_finish(&st, rb) +} + +// END GENERATED CODE + +macro_rules! impl_hash { + ($name:ident, $digest_size:literal, $state:ty, $malloc:expr, $reset:expr, $update:expr, $finish:expr, $copy:expr, $hash:expr) => { + pub struct $name { + state: $state, + } + + impl $name { + /// Initialize a new digest state for streaming use. + pub fn new() -> $name { + $name { state: $malloc() } + } + + /// Add the `payload` to the digest. + pub fn update(&mut self, payload: &[u8]) { + $update(self.state.as_mut(), payload, payload.len() as u32); + } + + /// Get the digest. + /// + /// Note that the digest state can be continued to be used, to extend the + /// digest. + pub fn finish(&self, digest: &mut [u8]) { + $finish(self.state.as_ref(), digest); + } + + /// Return the digest for the given input byte slice, in immediate mode. + pub fn hash(digest: &mut [u8], input: &[u8]) { + $hash(digest, input, input.len() as u32) + } + } + + impl Clone for $name { + fn clone(&self) -> Self { + Self { + state: $copy(self.state.as_ref()), + } + } + } + }; +} + +impl_hash!( + HaclRs_Sha2_Sha256, + 32, + Box<[crate::hacl_rs::streaming_types::state_32]>, + malloc_256, + wat, + update_256, + digest_256, + copy_256, + hash_256 +); diff --git a/src/hacl_rs/hkdf.rs b/src/hacl_rs/hkdf.rs new file mode 100644 index 000000000..9b51de11b --- /dev/null +++ b/src/hacl_rs/hkdf.rs @@ -0,0 +1,452 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_sha2_256( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 32u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_sha2_256(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_sha2_256(prk, salt, saltlen, ikm, ikmlen) +} + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_sha2_384( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 48u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_sha2_384(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_sha2_384(prk, salt, saltlen, ikm, ikmlen) +} + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_sha2_512( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 64u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_sha2_512(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_sha2_512(prk, salt, saltlen, ikm, ikmlen) +} + +/* no blake2 for now +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_blake2s_32( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 32u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_blake2s_32(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_blake2s_32(prk, salt, saltlen, ikm, ikmlen) +} + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_blake2b_32( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 64u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_blake2b_32(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_blake2b_32(prk, salt, saltlen, ikm, ikmlen) +} +*/ diff --git a/src/hacl_rs/hmac.rs b/src/hacl_rs/hmac.rs new file mode 100644 index 000000000..a30347d58 --- /dev/null +++ b/src/hacl_rs/hmac.rs @@ -0,0 +1,764 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use super::fstar; +use super::lowstar; + +#[derive(PartialEq, Clone, Copy)] +pub(crate) struct __uint32_t_uint32_t { + pub fst: u32, + pub snd: u32, +} + +/** +Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. +`dst` must point to 20 bytes of memory. +*/ +pub fn compute_sha1(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 64u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 64u32 { key_len } else { 20u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 64u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha1::hash_oneshot(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut s: [u32; 5] = [ + 0x67452301u32, + 0xefcdab89u32, + 0x98badcfeu32, + 0x10325476u32, + 0xc3d2e1f0u32, + ]; + if data_len == 0u32 { + crate::hacl_rs::hash_sha1::update_last(&mut s, 0u64, &ipad, 64u32) + } else { + let block_len: u32 = 64u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: data_len.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha1::update_multi(&mut s, &ipad, 1u32); + crate::hacl_rs::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); + crate::hacl_rs::hash_sha1::update_last( + &mut s, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem0.1, + rem_len, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha1::finish(&s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha1::init(&mut s); + let block_len: u32 = 64u32; + let n_blocks: u32 = 20u32.wrapping_div(block_len); + let rem: u32 = 20u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: 20u32.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha1::update_multi(&mut s, &opad, 1u32); + crate::hacl_rs::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); + crate::hacl_rs::hash_sha1::update_last( + &mut s, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem0.1, + rem_len, + ); + crate::hacl_rs::hash_sha1::finish(&s, dst) +} + +/** +Write the HMAC-SHA-2-256 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 32 bytes of memory. +*/ +pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 64u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 64u32 { key_len } else { 32u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 64u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha2::hash_256(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut st: [u32; 8] = [0u32; 8usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (&crate::hacl_rs::hash_sha2::h256)[i as usize]; + let os: (&mut [u32], &mut [u32]) = st.split_at_mut(0usize); + os.1[i as usize] = x + }); + let s: &mut [u32] = &mut st; + if data_len == 0u32 { + crate::hacl_rs::hash_sha2::sha256_update_last( + 0u64.wrapping_add(64u32 as u64), + 64u32, + &ipad, + s, + ) + } else { + let block_len: u32 = 64u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: data_len.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, &ipad, s); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha256_update_last( + (64u32 as u64) + .wrapping_add(full_blocks_len as u64) + .wrapping_add(rem_len as u64), + rem_len, + rem0.1, + s, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha2::sha256_finish(s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha2::sha256_init(s); + let block_len: u32 = 64u32; + let n_blocks: u32 = 32u32.wrapping_div(block_len); + let rem: u32 = 32u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: 32u32.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, &opad, s); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha256_update_last( + (64u32 as u64) + .wrapping_add(full_blocks_len as u64) + .wrapping_add(rem_len as u64), + rem_len, + rem0.1, + s, + ); + crate::hacl_rs::hash_sha2::sha256_finish(s, dst) +} + +/** +Write the HMAC-SHA-2-384 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 48 bytes of memory. +*/ +pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 128u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 128u32 { key_len } else { 48u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 128u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha2::hash_384(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut st: [u64; 8] = [0u64; 8usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h384)[i as usize]; + let os: (&mut [u64], &mut [u64]) = st.split_at_mut(0usize); + os.1[i as usize] = x + }); + let s: &mut [u64] = &mut st; + if data_len == 0u32 { + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(0u64), + fstar::uint128::uint64_to_uint128(128u32 as u64), + ), + 128u32, + &ipad, + s, + ) + } else { + let block_len: u32 = 128u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: data_len.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(128u32, &ipad, s); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha2::sha384_finish(s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha2::sha384_init(s); + let block_len: u32 = 128u32; + let n_blocks: u32 = 48u32.wrapping_div(block_len); + let rem: u32 = 48u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: 48u32.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(128u32, &opad, s); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ); + crate::hacl_rs::hash_sha2::sha384_finish(s, dst) +} + +/** +Write the HMAC-SHA-2-512 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 64 bytes of memory. +*/ +pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 128u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 128u32 { key_len } else { 64u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 128u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha2::hash_512(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut st: [u64; 8] = [0u64; 8usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h512)[i as usize]; + let os: (&mut [u64], &mut [u64]) = st.split_at_mut(0usize); + os.1[i as usize] = x + }); + let s: &mut [u64] = &mut st; + if data_len == 0u32 { + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(0u64), + fstar::uint128::uint64_to_uint128(128u32 as u64), + ), + 128u32, + &ipad, + s, + ) + } else { + let block_len: u32 = 128u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: data_len.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, &ipad, s); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha2::sha512_finish(s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha2::sha512_init(s); + let block_len: u32 = 128u32; + let n_blocks: u32 = 64u32.wrapping_div(block_len); + let rem: u32 = 64u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: 64u32.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, &opad, s); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ); + crate::hacl_rs::hash_sha2::sha512_finish(s, dst) +} + +/* no blake2 for now + +/** +Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 32 bytes of memory. +*/ +pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 64u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 64u32 { key_len } else { 32u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 64u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_blake2s::hash_with_key(zeroes.0, 32u32, key, key_len, &[], 0u32) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut s: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::init(&mut s, 0u32, 32u32); + let s0: &mut [u32] = &mut s; + if data_len == 0u32 { + let mut wv: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_last(64u32, &mut wv, s0, false, 0u64, 64u32, &ipad) + } else { + let block_len: u32 = 64u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: data_len.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &ipad, 1u32); + let mut wv0: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi( + n_blocks0.wrapping_mul(64u32), + &mut wv0, + s0, + block_len as u64, + rem0.0, + n_blocks0, + ); + let mut wv1: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_last( + rem_len, + &mut wv1, + s0, + false, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem_len, + rem0.1, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_blake2s::finish(32u32, dst1.1, s0); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_blake2s::init(s0, 0u32, 32u32); + let block_len: u32 = 64u32; + let n_blocks: u32 = 32u32.wrapping_div(block_len); + let rem: u32 = 32u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: 32u32.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &opad, 1u32); + let mut wv0: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi( + n_blocks0.wrapping_mul(64u32), + &mut wv0, + s0, + block_len as u64, + rem0.0, + n_blocks0, + ); + let mut wv1: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_last( + rem_len, + &mut wv1, + s0, + false, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem_len, + rem0.1, + ); + crate::hacl_rs::hash_blake2s::finish(32u32, dst, s0) +} + +/** +Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 64 bytes of memory. +*/ +pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 128u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 128u32 { key_len } else { 64u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 128u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_blake2b::hash_with_key(zeroes.0, 64u32, key, key_len, &[], 0u32) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut s: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::init(&mut s, 0u32, 64u32); + let s0: &mut [u64] = &mut s; + if data_len == 0u32 { + let mut wv: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_last( + 128u32, + &mut wv, + s0, + false, + fstar::uint128::uint64_to_uint128(0u64), + 128u32, + &ipad, + ) + } else { + let block_len: u32 = 128u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: data_len.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + 128u32, + &mut wv, + s0, + fstar::uint128::uint64_to_uint128(0u64), + &ipad, + 1u32, + ); + let mut wv0: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + n_blocks0.wrapping_mul(128u32), + &mut wv0, + s0, + fstar::uint128::uint64_to_uint128(block_len as u64), + rem0.0, + n_blocks0, + ); + let mut wv1: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_last( + rem_len, + &mut wv1, + s0, + false, + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + rem_len, + rem0.1, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_blake2b::finish(64u32, dst1.1, s0); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_blake2b::init(s0, 0u32, 64u32); + let block_len: u32 = 128u32; + let n_blocks: u32 = 64u32.wrapping_div(block_len); + let rem: u32 = 64u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocks·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks·, + snd: 64u32.wrapping_sub(n_blocks·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + 128u32, + &mut wv, + s0, + fstar::uint128::uint64_to_uint128(0u64), + &opad, + 1u32, + ); + let mut wv0: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + n_blocks0.wrapping_mul(128u32), + &mut wv0, + s0, + fstar::uint128::uint64_to_uint128(block_len as u64), + rem0.0, + n_blocks0, + ); + let mut wv1: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_last( + rem_len, + &mut wv1, + s0, + false, + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + rem_len, + rem0.1, + ); + crate::hacl_rs::hash_blake2b::finish(64u32, dst, s0) +} +*/ diff --git a/src/hacl_rs/lowstar.rs b/src/hacl_rs/lowstar.rs new file mode 100644 index 000000000..f63af5cbe --- /dev/null +++ b/src/hacl_rs/lowstar.rs @@ -0,0 +1,2 @@ +pub mod endianness; +pub mod ignore; diff --git a/src/hacl_rs/lowstar/endianness.rs b/src/hacl_rs/lowstar/endianness.rs new file mode 100644 index 000000000..00d3ea9c5 --- /dev/null +++ b/src/hacl_rs/lowstar/endianness.rs @@ -0,0 +1,53 @@ +use std::convert::TryInto; + +// Little Endian + +pub fn load16_le(bytes: &[u8]) -> u16 { + u16::from_le_bytes(bytes[0..2].try_into().unwrap()) +} + +pub fn store16_le(bytes: &mut[u8], x: u16) { + bytes[0..2].copy_from_slice(&u16::to_le_bytes(x)) +} + +pub fn load32_le(bytes: &[u8]) -> u32 { + u32::from_le_bytes(bytes[0..4].try_into().unwrap()) +} + +pub fn store32_le(bytes: &mut[u8], x: u32) { + bytes[0..4].copy_from_slice(&u32::to_le_bytes(x)) +} + +pub fn load64_le(bytes: &[u8]) -> u64 { + u64::from_le_bytes(bytes[0..8].try_into().unwrap()) +} + +pub fn store64_le(bytes: &mut[u8], x: u64) { + bytes[0..8].copy_from_slice(&u64::to_le_bytes(x)) +} + +// Big Endian + +pub fn load32_be(bytes: &[u8]) -> u32 { + u32::from_be_bytes(bytes[0..4].try_into().unwrap()) +} + +pub fn store32_be(bytes: &mut[u8], x: u32) { + bytes[0..4].copy_from_slice(&u32::to_be_bytes(x)) +} + +pub fn load64_be(bytes: &[u8]) -> u64 { + u64::from_be_bytes(bytes[0..8].try_into().unwrap()) +} + +pub fn store64_be(bytes: &mut[u8], x: u64) { + bytes[0..8].copy_from_slice(&u64::to_be_bytes(x)) +} + +pub fn load128_be(bytes: &[u8]) -> u128 { + u128::from_be_bytes(bytes[0..16].try_into().unwrap()) +} + +pub fn store128_be(bytes: &mut[u8], x: u128) { + bytes[0..16].copy_from_slice(&u128::to_be_bytes(x)) +} diff --git a/src/hacl_rs/lowstar/ignore.rs b/src/hacl_rs/lowstar/ignore.rs new file mode 100644 index 000000000..919eb52f9 --- /dev/null +++ b/src/hacl_rs/lowstar/ignore.rs @@ -0,0 +1 @@ +pub fn ignore(_: T) {} diff --git a/src/hacl_rs/mod.rs b/src/hacl_rs/mod.rs new file mode 100644 index 000000000..aa1ef1d51 --- /dev/null +++ b/src/hacl_rs/mod.rs @@ -0,0 +1,9 @@ +// Utility modules. In the generated hacl-rs, these are individual crates. +mod fstar; +mod lowstar; + +pub(crate) mod hash_sha1; +pub(crate) mod hash_sha2; +//pub(crate) mod hkdf; +pub(crate) mod hmac; +pub(crate) mod streaming_types; diff --git a/src/hacl_rs/streaming_types.rs b/src/hacl_rs/streaming_types.rs new file mode 100644 index 000000000..886aad904 --- /dev/null +++ b/src/hacl_rs/streaming_types.rs @@ -0,0 +1,41 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +#[derive(PartialEq, Clone, Copy)] +pub enum hash_alg +{ + SHA2_224, + SHA2_256, + SHA2_384, + SHA2_512, + SHA1, + MD5, + Blake2S, + Blake2B, + SHA3_256, + SHA3_224, + SHA3_384, + SHA3_512, + Shake128, + Shake256 +} + +#[derive(PartialEq, Clone, Copy)] +pub enum error_code +{ + Success, + InvalidAlgorithm, + InvalidLength, + MaximumLengthExceeded +} + +#[derive(PartialEq, Clone)] +pub struct state_32 +{ pub block_state: Box<[u32]>, pub buf: Box<[u8]>, pub total_len: u64 } + +#[derive(PartialEq, Clone)] +pub struct state_64 +{ pub block_state: Box<[u64]>, pub buf: Box<[u8]>, pub total_len: u64 } diff --git a/src/lib.rs b/src/lib.rs index 8642eaa91..cf2e6304b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,7 @@ pub(crate) mod jasmin; // HACL pub(crate) mod hacl; +pub(crate) mod hacl_rs; // libcrux pub mod aead; diff --git a/tests/sha2.rs b/tests/sha2.rs index 1e504ff3e..ac1e44ded 100644 --- a/tests/sha2.rs +++ b/tests/sha2.rs @@ -4,8 +4,9 @@ #[test] fn sha256_kat_streaming() { let mut digest = libcrux::digest::Sha2_256::new(); + let mut d = [0u8; 32]; digest.update(b"libcrux sha2 256 tests"); - let d = digest.finish(); + digest.finish(&mut d); let expected = "8683520e19e5b33db33c8fb90918c0c96fcdfd9a17c695ce0f0ea2eaa0c95956"; assert_eq!(hex::encode(&d), expected); @@ -38,8 +39,10 @@ fn sha2_clone() { let mut hasher256_2 = hasher_256.clone(); hasher_256.update(b"more 256"); hasher256_2.update(b"more 256"); - let digest = hasher_256.finish(); - let digest_2 = hasher256_2.finish(); + let mut digest = [0u8; 32]; + let mut digest_2 = [0u8; 32]; + hasher_256.finish(&mut digest); + hasher256_2.finish(&mut digest_2); assert_eq!(digest, digest_2); assert_eq!(digest, libcrux::digest::sha2_256(b"test 256more 256"));