From 159f7ad4880359a0e99d167a02d7ffa4d099799c Mon Sep 17 00:00:00 2001 From: Isaac Holt Date: Wed, 18 Dec 2024 15:54:14 +0000 Subject: [PATCH] add new float functionality, rewrite casting code --- Cargo.toml | 3 +- README.md | 2 +- TODO.txt | 40 +- bench/Cargo.toml | 7 +- bench/benches/benchmark.rs | 256 ++++------ bench/benches/float.rs | 42 ++ bench/benches/unzip.rs | 23 + bench/open-report.sh | 1 + changes/v0.12.0 | 2 +- changes/v0.13.0 | 2 + scripts/quickcheck_test_name.sh | 2 +- src/bint/endian.rs | 12 +- src/bint/numtraits.rs | 6 +- src/buint/as_float.rs | 248 --------- src/buint/cast.rs | 86 ++-- src/buint/checked.rs | 195 +------ src/buint/div.rs | 205 ++++++++ src/buint/endian.rs | 18 +- src/buint/float_as.rs | 205 -------- src/buint/mod.rs | 9 +- src/buint/mul.rs | 79 +++ src/buint/numtraits.rs | 7 +- src/buint/overflowing.rs | 35 -- src/cast/float/float_from_uint.rs | 73 +++ src/cast/float/mod.rs | 244 +++++++++ src/cast/float/uint_from_float.rs | 81 +++ src/{cast.rs => cast/mod.rs} | 5 +- src/doc/bigint_helpers.rs | 2 +- src/doc/checked.rs | 4 +- src/doc/classify.rs | 18 + src/doc/cmp.rs | 16 + src/doc/consts.rs | 17 + src/doc/endian.rs | 10 +- src/doc/math.rs | 15 + src/doc/mod.rs | 73 ++- src/doc/overflowing.rs | 2 +- src/doc/rounding.rs | 15 + src/doc/saturating.rs | 2 +- src/doc/strict.rs | 2 +- src/doc/unchecked.rs | 2 +- src/doc/wrapping.rs | 4 +- src/float/cast.rs | 205 ++++---- src/float/classify.rs | 73 ++- src/float/cmp.rs | 78 +-- src/float/const_trait_fillers.rs | 55 ++ src/float/consts.rs | 110 ++-- src/float/convert.rs | 257 +++++++++- src/float/endian.rs | 38 +- src/float/math.rs | 783 ----------------------------- src/float/math/mod.rs | 112 +++++ src/float/math/recip.rs | 178 +++++++ src/float/math/remquof.rs | 103 ++++ src/float/math/sqrt.rs | 79 +++ src/float/mod.rs | 424 ++++------------ src/float/numtraits.rs | 77 +++ src/float/ops.rs | 810 ------------------------------ src/float/ops/add.rs | 119 +++++ src/float/ops/div.rs | 295 +++++++++++ src/float/ops/mod.rs | 150 ++++++ src/float/ops/mul.rs | 149 ++++++ src/float/ops/rem.rs | 120 +++++ src/float/ops/sub.rs | 140 ++++++ src/float/parse.rs | 190 +++++++ src/float/random.rs | 86 ++++ src/float/rounding copy.rs | 334 ++++++++++++ src/float/rounding.rs | 281 +++++++++++ src/helpers.rs | 106 ++++ src/int/numtraits.rs | 56 ++- src/lib.rs | 10 +- src/test/convert.rs | 36 +- src/test/mod.rs | 2 + src/test/types.rs | 24 +- 72 files changed, 4396 insertions(+), 3154 deletions(-) create mode 100644 bench/benches/float.rs create mode 100644 bench/benches/unzip.rs create mode 100644 bench/open-report.sh create mode 100644 changes/v0.13.0 delete mode 100644 src/buint/as_float.rs create mode 100644 src/buint/div.rs delete mode 100644 src/buint/float_as.rs create mode 100644 src/buint/mul.rs create mode 100644 src/cast/float/float_from_uint.rs create mode 100644 src/cast/float/mod.rs create mode 100644 src/cast/float/uint_from_float.rs rename src/{cast.rs => cast/mod.rs} (97%) create mode 100644 src/doc/classify.rs create mode 100644 src/doc/cmp.rs create mode 100644 src/doc/math.rs create mode 100644 src/doc/rounding.rs create mode 100644 src/float/const_trait_fillers.rs delete mode 100644 src/float/math.rs create mode 100644 src/float/math/mod.rs create mode 100644 src/float/math/recip.rs create mode 100644 src/float/math/remquof.rs create mode 100644 src/float/math/sqrt.rs create mode 100644 src/float/numtraits.rs delete mode 100644 src/float/ops.rs create mode 100644 src/float/ops/add.rs create mode 100644 src/float/ops/div.rs create mode 100644 src/float/ops/mod.rs create mode 100644 src/float/ops/mul.rs create mode 100644 src/float/ops/rem.rs create mode 100644 src/float/ops/sub.rs create mode 100644 src/float/parse.rs create mode 100644 src/float/random.rs create mode 100644 src/float/rounding copy.rs create mode 100644 src/float/rounding.rs create mode 100644 src/helpers.rs diff --git a/Cargo.toml b/Cargo.toml index 5d5987d..2d1f1b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,13 @@ exclude = ["src/float/*", "src/tests", "TODO.txt", "lit-parser/*"] # TODO: make [features] default = [] nightly = [] +float = [] serde = ["dep:serde", "serde-big-array"] numtraits = ["num-integer", "num-traits"] [dependencies] num-integer = { version = "0.1", optional = true, default-features = false } -num-traits = { version = "0.2", optional = true, default-features = false } +num-traits = { version = "0.2.19", optional = true, default-features = false } serde = { version = "1.0", features = ["derive"], optional = true, default-features = false } serde-big-array = { version = "0.5", optional = true, default-features = false } rand = { version = "0.8", features = ["min_const_gen"], optional = true, default-features = false } diff --git a/README.md b/README.md index bcfb131..e062f5b 100644 --- a/README.md +++ b/README.md @@ -133,7 +133,7 @@ The `valuable` feature enables the [`Valuable`](https://docs.rs/valuable/latest/ ### Nightly features -Activating the `nightly` feature will enable the `from_be_bytes`, `from_le_bytes`, `from_ne_bytes`, `to_be_bytes`, `to_le_bytes` and `to_ne_bytes` methods on `bnum`'s unsigned and signed integers and will make the `unchecked_...` methods `const`. This comes at the cost of only being able to compile on nightly. The nightly features that this uses are [`generic_const_exprs`](https://github.com/rust-lang/rust/issues/76560), [`const_trait_impl`](https://github.com/rust-lang/rust/issues/67792), [`effects`](https://github.com/rust-lang/rust/issues/102090) and [`const_option`](https://github.com/rust-lang/rust/issues/67441). +Activating the `nightly` feature will enable the `from_be_bytes`, `from_le_bytes`, `from_ne_bytes`, `to_be_bytes`, `to_le_bytes` and `to_ne_bytes` methods on `bnum`'s unsigned and signed integers and will make the `unchecked_...` methods `const`. This comes at the cost of only being able to compile on nightly. The nightly features that this uses are [`generic_const_exprs`](https://github.com/rust-lang/rust/issues/76560), [`const_trait_impl`](https://github.com/rust-lang/rust/issues/67792) and [`const_option`](https://github.com/rust-lang/rust/issues/67441). ## Testing diff --git a/TODO.txt b/TODO.txt index f89b397..4a19359 100644 --- a/TODO.txt +++ b/TODO.txt @@ -2,12 +2,14 @@ Floats: * Conversions from and to: * primitive floats * bnum floats -* FromStr trait +* FromStr trait: REMEMBER: the num_traits crate has a general from_str_radix method for floats, could use if stuck * Display, debug, upper exp, lower exp traits * Transcendental functions: * exp * exp2 + * exp_m1 * ln + * ln_1p * log * log2 * log10 @@ -21,8 +23,6 @@ Floats: * atan * atan2 * sin_cos - * exp_m1 - * ln_1p * sinh * cosh * tanh @@ -31,14 +31,19 @@ Floats: * atanh * to_degrees * to_radians + * powf + * gamma + * ln_gamma +* Other functions: + * mul_add + * midpoint + * recip * Optimised division algorithm depending on size of mantissa -* recip * Optimised multiplication algorithm depending on size of mantissa -* Fused multiply add * Constants: - * DIGITS - * MIN_10_EXP - * MAX_10_EXP + * DIGITS. For this, we can divide MB by f64::LOG2_10 and take floor (roughly speaking). + * MIN_10_EXP. For this, we can divide MIN_EXP by f64::LOG2_10, and take floor (roughly speaking) + * MAX_10_EXP. For this, we can divide MAX_EXP by f64::LOG2_10, and take floor (roughly speaking) * Maths constants: * E * FRAC_1_PI @@ -62,11 +67,20 @@ Floats: * From/TryFrom trait for ints, other floats * Float type aliases from IEEE standard: f16, f32, f64, f80, f128. (Include f32 and f64 as allows const methods which aren't available on the primitives) * Serde -* Rand -* num_traits::{Bounded, Float, FloatConst, AsPrimitive, FromPrimitive, ToPrimitive, One, Zero, Inv, MulAdd, MulAddAssign, Pow, Signed, Euclid, Num} +* Rand: + * gen_range stuff +* num_traits::{Bounded, Float, FloatConst, FloatCore, AsPrimitive, FromPrimitive, ToPrimitive, ConstZero, ConstOne, One, Zero, FromBytes, ToBytes, Inv, MulAdd, MulAddAssign, Pow, Signed, Euclid, Num} * Division algorithm which doesn't need where clause Ints: -Faster mulitplication algorithm for larger integers -Faster division algorithms for larger integers -Update serde to use decimal string instead of struct debug - but CHECK that all serde options serialise primitive ints as decimal strings \ No newline at end of file +* Use new Rust const capabilities to write more idiomatic code (e.g. we don't need the option_expect! macro anymore). Note though that maybe we should wait a bit to keep the MSRV not too recent +* Faster mulitplication algorithm for larger integers +* Faster division algorithms for larger integers +* Update serde to use decimal string instead of struct debug - but CHECK that all serde options serialise primitive ints as decimal strings +* FromBytes and ToBytes num_traits traits - only when can implement without "unconstrained generic constant" error + +Other stuff: +* Think about removing BTryFrom and just implementing TryFrom (no From for now), then can use CastFrom/As trait for Result-less conversions +* Replace bitors, bitands, shifts, masks etc. with more efficient implementations (e.g. using set_bit, flip_bit, one-less-than-power-of-two methods, methods for efficiently generating masks/getting certain range of bits of integer) +* Consider putting floats and signed integers behind optional features (which are enabled by default) +* Add 16 bit and 32 bit width types to the test widths, so test u16, u32, f16, f32 as well (just make the digit sizes that are too wide not do anything for those tests) \ No newline at end of file diff --git a/bench/Cargo.toml b/bench/Cargo.toml index b4808bd..7551c49 100644 --- a/bench/Cargo.toml +++ b/bench/Cargo.toml @@ -7,12 +7,17 @@ license = "MIT OR Apache-2.0" [dev-dependencies] criterion = { version = "0.5" } -bnum = { path = "../", features = ["rand", "nightly"] } +bnum = { path = "../", features = ["rand", "nightly", "float"] } num-traits = "0.2" uint = "0.9" rand = { version = "0.8", features = ["std", "std_rng"] } paste = "1.0" +[[bench]] +name = "float" +# path = "benches/float/mod.rs" +harness = false + [[bench]] name = "benchmark" harness = false diff --git a/bench/benches/benchmark.rs b/bench/benches/benchmark.rs index 5fb96cd..dc602ee 100644 --- a/bench/benches/benchmark.rs +++ b/bench/benches/benchmark.rs @@ -6,48 +6,16 @@ use core::iter::Iterator; use criterion::black_box; use rand::prelude::*; +mod unzip; +use unzip::unzip2; + +// use super::unzip2; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; const SAMPLE_SIZE: usize = 10000; -macro_rules! unzip { - (fn $name: ident <$($Gen: ident), *>) => { - paste::paste! { - fn $name<$($Gen), *, I>(i: I) -> ($(Vec<$Gen>), *) - where I: Iterator - { - let ($(mut []), *) = match i.size_hint().1 { - Some(size) => ($(Vec::<$Gen>::with_capacity(size)), *), - None => ($(Vec::<$Gen>::new()), *), - }; - i.for_each(|($([<$Gen:lower>]), *)| { - $( - [].push([<$Gen:lower>]); - )* - }); - ($([]), *) - } - } - }; -} - -// unzip!(fn unzip3); -unzip!(fn unzip2); - -// mod uuint { -// uint::construct_uint! { -// pub struct UU128(2); -// } - -// uint::construct_uint! { -// pub struct UU512(8); -// } -// } - -// use uuint::*; - macro_rules! bench_against_primitive { - { $primitive: ty; $($method: ident ($($param: ident : $(ref $re: tt)? $ty: ty), *);) * } => { + { $primitive: ty; $group_name: ident; $($method: ident ($($param: ident : $(ref $re: tt)? $ty: ty), *);) * } => { paste::paste! { $( fn [](c: &mut Criterion) { @@ -64,7 +32,9 @@ macro_rules! bench_against_primitive { let big_inputs = inputs.0; let prim_inputs = inputs.1; // let ruint_inputs = inputs.2; + const SIZE_ID: &'static str = stringify!([<$primitive:upper>]); + group.bench_with_input(BenchmarkId::new("bnum", SIZE_ID), &big_inputs, |b, inputs| { b.iter(|| { for ($($param), *, ()) in inputs.iter().cloned() { @@ -90,7 +60,7 @@ macro_rules! bench_against_primitive { group.finish(); } )* - criterion_group!([<$primitive _benches>], $([]), *); + criterion_group!($group_name, $([]), *); } }; } @@ -106,149 +76,129 @@ trait Format { fn lower_exp(self) -> String; } -// macro_rules! impl_format { -// ($($ty: ty), *) => { -// $( -// impl Format for $ty { -// fn display(self) -> String { -// format!("{}", self) -// } -// fn debug(self) -> String { -// format!("{:?}", self) -// } -// fn binary(self) -> String { -// format!("{:b}", self) -// } -// fn upper_hex(self) -> String { -// format!("{:X}", self) -// } -// fn lower_hex(self) -> String { -// format!("{:x}", self) -// } -// fn octal(self) -> String { -// format!("{:o}", self) -// } -// fn upper_exp(self) -> String { -// format!("{:E}", self) -// } -// fn lower_exp(self) -> String { -// format!("{:e}", self) -// } -// } -// )* -// }; -// } - -// impl_format!(u128, BU128); - -// use core::cmp::{PartialEq, PartialOrd}; -// use core::ops::{BitAnd, BitOr, BitXor, Not}; - -// // use num_traits::PrimInt; - -// trait BenchFrom { -// fn from(value: T) -> Self; -// } +macro_rules! impl_format { + ($($ty: ty), *) => { + $( + impl Format for $ty { + fn display(self) -> String { + format!("{}", self) + } + fn debug(self) -> String { + format!("{:?}", self) + } + fn binary(self) -> String { + format!("{:b}", self) + } + fn upper_hex(self) -> String { + format!("{:X}", self) + } + fn lower_hex(self) -> String { + format!("{:x}", self) + } + fn octal(self) -> String { + format!("{:o}", self) + } + fn upper_exp(self) -> String { + format!("{:E}", self) + } + fn lower_exp(self) -> String { + format!("{:e}", self) + } + } + )* + }; +} -// impl<'a> BenchFrom<&'a BU128> for &'a UU128 { -// fn from(value: &'a BU128) -> Self { -// unsafe { -// &*(value as *const BU128 as *const UU128) -// } -// } -// } +impl_format!(u128, U128); -// impl BenchFrom for UU128 { -// fn from(value: u128) -> Self { -// From::from(value) -// } -// } +use core::cmp::{PartialEq, PartialOrd}; +// use core::ops::{BitAnd, BitOr, BitXor, Not}; bench_against_primitive! { - u128; - from_be_bytes(a: [u8; 16]); + u128; u128_benches; + checked_add(a: u128, b: u128); - // checked_add_signed(a: u128, b: i128); + checked_add_signed(a: u128, b: i128); checked_sub(a: u128, b: u128); checked_mul(a: u128, b: u128); checked_div(a: u128, b: u128); - // checked_div_euclid(a: u128, b: u128); + checked_div_euclid(a: u128, b: u128); checked_rem(a: u128, b: u128); - // checked_rem_euclid(a: u128, b: u128); + checked_rem_euclid(a: u128, b: u128); checked_neg(a: u128); - // checked_shl(a: u128, b: u32); - // checked_shr(a: u128, b: u32); + checked_shl(a: u128, b: u32); + checked_shr(a: u128, b: u32); checked_pow(a: u128, b: u32); - // checked_next_multiple_of(a: u128, b: u128); - // checked_ilog2(a: u128); - // checked_ilog10(a: u128); - // checked_ilog(a: u128, b: u128); - // checked_next_power_of_two(a: u128); - - // from_be(a: u128); - // from_le(a: u128); - // to_be(a: u128); - // to_le(a: u128); - // to_be_bytes(a: u128); - // to_le_bytes(a: u128); - // to_ne_bytes(a: u128); - // from_be_bytes(a: [u8; 128 / 8]); - // from_le_bytes(a: [u8; 128 / 8]); - // from_ne_bytes(a: [u8; 128 / 8]); + checked_next_multiple_of(a: u128, b: u128); + checked_ilog2(a: u128); + checked_ilog10(a: u128); + checked_ilog(a: u128, b: u128); + checked_next_power_of_two(a: u128); + + from_be(a: u128); + from_le(a: u128); + to_be(a: u128); + to_le(a: u128); + to_be_bytes(a: u128); + to_le_bytes(a: u128); + to_ne_bytes(a: u128); + from_be_bytes(a: [u8; 128 / 8]); + from_le_bytes(a: [u8; 128 / 8]); + from_ne_bytes(a: [u8; 128 / 8]); overflowing_add(a: u128, b: u128); - // overflowing_add_signed(a: u128, b: i128); + overflowing_add_signed(a: u128, b: i128); overflowing_sub(a: u128, b: u128); overflowing_mul(a: u128, b: u128); overflowing_neg(a: u128); - // overflowing_shl(a: u128, b: u32); - // overflowing_shr(a: u128, b: u32); + overflowing_shl(a: u128, b: u32); + overflowing_shr(a: u128, b: u32); overflowing_pow(a: u128, b: u32); - // display(a: u128); - // debug(a: u128); - // binary(a: u128); - // upper_hex(a: u128); - // lower_hex(a: u128); - // octal(a: u128); - // upper_exp(a: u128); - // lower_exp(a: u128); + display(a: u128); + debug(a: u128); + binary(a: u128); + upper_hex(a: u128); + lower_hex(a: u128); + octal(a: u128); + upper_exp(a: u128); + lower_exp(a: u128); saturating_add(a: u128, b: u128); - // saturating_add_signed(a: u128, b: i128); + saturating_add_signed(a: u128, b: i128); saturating_sub(a: u128, b: u128); saturating_mul(a: u128, b: u128); - // saturating_pow(a: u128, exp: u32); - - // wrapping_add(a: u128, b: u128); - // wrapping_add_signed(a: u128, b: i128); - // wrapping_sub(a: u128, b: u128); - // wrapping_mul(a: u128, b: u128); - // wrapping_neg(a: u128); - // wrapping_shl(a: u128, rhs: u32); - // wrapping_shr(a: u128, rhs: u32); - // wrapping_pow(a: u128, exp: u32); - // wrapping_next_power_of_two(a: u128); - - // count_ones(a: u128); - // count_zeros(a: u128); - // leading_zeros(a: u128); - // trailing_zeros(a: u128); - // leading_ones(a: u128); - // trailing_ones(a: u128); - // rotate_left(a: u128, b: u32); - // rotate_right(a: u128, b: u32); - // swap_bytes(a: u128); - // reverse_bits(a: u128); - // is_power_of_two(a: u128); + saturating_pow(a: u128, exp: u32); + + wrapping_add(a: u128, b: u128); + wrapping_add_signed(a: u128, b: i128); + wrapping_sub(a: u128, b: u128); + wrapping_mul(a: u128, b: u128); + wrapping_neg(a: u128); + wrapping_shl(a: u128, rhs: u32); + wrapping_shr(a: u128, rhs: u32); + wrapping_pow(a: u128, exp: u32); + wrapping_next_power_of_two(a: u128); + + count_ones(a: u128); + count_zeros(a: u128); + leading_zeros(a: u128); + trailing_zeros(a: u128); + leading_ones(a: u128); + trailing_ones(a: u128); + rotate_left(a: u128, b: u32); + rotate_right(a: u128, b: u32); + swap_bytes(a: u128); + reverse_bits(a: u128); + is_power_of_two(a: u128); // bitand(a: u128, b: u128); // bitor(a: u128, b: u128); // bitxor(a: u128, b: u128); // not(a: u128); - // eq(a: ref &u128, b: ref &u128); - // partial_cmp(a: ref &u128, b: ref &u128); + eq(a: ref &u128, b: ref &u128); + partial_cmp(a: ref &u128, b: ref &u128); } criterion_main!(u128_benches); diff --git a/bench/benches/float.rs b/bench/benches/float.rs new file mode 100644 index 0000000..8d2bd2e --- /dev/null +++ b/bench/benches/float.rs @@ -0,0 +1,42 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use rand::prelude::*; + +mod unzip; + +const SAMPLE_SIZE: usize = 10000; + +use bnum::Float; + +type F64 = Float<8, 52>; +type F32 = Float<4, 23>; + + +fn bench_fibs(c: &mut Criterion) { + let mut group = c.benchmark_group("round"); + let mut rng = rand::rngs::StdRng::seed_from_u64(0); + let inputs = (0..SAMPLE_SIZE) + .map(|_| rng.gen::()) + .map(|a| a.to_bits()) + .map(|a| (f64::from_bits(a), F64::from_bits(a.into()))); + let (prim_inputs, big_inputs) = unzip::unzip2(inputs); + + // group.bench_with_input(BenchmarkId::new("Recursive", "new"), &big_inputs, |b, inputs| b.iter(|| { + // for a in inputs.iter().cloned() { + // let _ = black_box(a).floor(); + // } + // })); + group.bench_with_input(BenchmarkId::new("Iterative", "old"), &big_inputs, |b, inputs| b.iter(|| { + inputs.iter().cloned().for_each(|a| { + let _ = black_box(a).trunc(); + }) + })); + group.bench_with_input(BenchmarkId::new("Iterative", "prim"), &prim_inputs, |b, inputs| b.iter(|| { + inputs.iter().cloned().for_each(|a| { + let _ = black_box(a).trunc(); + }) + })); + group.finish(); +} + +criterion_group!(benches, bench_fibs); +criterion_main!(benches); \ No newline at end of file diff --git a/bench/benches/unzip.rs b/bench/benches/unzip.rs new file mode 100644 index 0000000..aff3009 --- /dev/null +++ b/bench/benches/unzip.rs @@ -0,0 +1,23 @@ +macro_rules! unzip { + (fn $name: ident <$($Gen: ident), *>) => { + paste::paste! { + pub fn $name<$($Gen), *, I>(i: I) -> ($(Vec<$Gen>), *) + where I: Iterator + { + let ($(mut []), *) = match i.size_hint().1 { + Some(size) => ($(Vec::<$Gen>::with_capacity(size)), *), + None => ($(Vec::<$Gen>::new()), *), + }; + i.for_each(|($([<$Gen:lower>]), *)| { + $( + [].push([<$Gen:lower>]); + )* + }); + ($([]), *) + } + } + }; +} + +// unzip!(fn unzip3); +unzip!(fn unzip2); \ No newline at end of file diff --git a/bench/open-report.sh b/bench/open-report.sh new file mode 100644 index 0000000..81fa94a --- /dev/null +++ b/bench/open-report.sh @@ -0,0 +1 @@ +open target/criterion/$1/report/index.html \ No newline at end of file diff --git a/changes/v0.12.0 b/changes/v0.12.0 index 4086aab..42effd6 100644 --- a/changes/v0.12.0 +++ b/changes/v0.12.0 @@ -1,5 +1,5 @@ - Add optional borsh support -- Add `digits_mut` method to unsigned integers. +- Add `digits_mut` and `set_bit` method to unsigned integers. - `As` and `CastFrom` traits no longer const, as const trait support removed from latest nightly. - `cast_signed` method for unsigned integers. - `cast_unsigned` method for signed integers. diff --git a/changes/v0.13.0 b/changes/v0.13.0 new file mode 100644 index 0000000..9741646 --- /dev/null +++ b/changes/v0.13.0 @@ -0,0 +1,2 @@ +Add ConstZero and ConstOne (from num_traits) for ints +(maybe?) implementation of int cast to float was incorrect, now fixed \ No newline at end of file diff --git a/scripts/quickcheck_test_name.sh b/scripts/quickcheck_test_name.sh index b063c35..1d8c213 100644 --- a/scripts/quickcheck_test_name.sh +++ b/scripts/quickcheck_test_name.sh @@ -1,4 +1,4 @@ -export QUICKCHECK_TESTS=1000000 +export QUICKCHECK_TESTS=100000 iters=0 while true do diff --git a/src/bint/endian.rs b/src/bint/endian.rs index 27034bd..f8f084f 100644 --- a/src/bint/endian.rs +++ b/src/bint/endian.rs @@ -169,7 +169,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_be_bytes(self) -> [u8; N * digit::$Digit::BYTES as usize] { + pub const fn to_be_bytes(self) -> [u8; $BUint::::BYTES_USIZE] { self.bits.to_be_bytes() } @@ -178,7 +178,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_le_bytes(self) -> [u8; N * digit::$Digit::BYTES as usize] { + pub const fn to_le_bytes(self) -> [u8; $BUint::::BYTES_USIZE] { self.bits.to_le_bytes() } @@ -187,7 +187,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_ne_bytes(self) -> [u8; N * digit::$Digit::BYTES as usize] { + pub const fn to_ne_bytes(self) -> [u8; $BUint::::BYTES_USIZE] { self.bits.to_ne_bytes() } @@ -196,7 +196,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use] #[inline] - pub const fn from_be_bytes(bytes: [u8; N * digit::$Digit::BYTES as usize]) -> Self { + pub const fn from_be_bytes(bytes: [u8; $BUint::::BYTES_USIZE]) -> Self { Self::from_bits($BUint::from_be_bytes(bytes)) } @@ -205,7 +205,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use] #[inline] - pub const fn from_le_bytes(bytes: [u8; N * digit::$Digit::BYTES as usize]) -> Self { + pub const fn from_le_bytes(bytes: [u8; $BUint::::BYTES_USIZE]) -> Self { Self::from_bits($BUint::from_le_bytes(bytes)) } @@ -214,7 +214,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use] #[inline] - pub const fn from_ne_bytes(bytes: [u8; N * digit::$Digit::BYTES as usize]) -> Self { + pub const fn from_ne_bytes(bytes: [u8; $BUint::::BYTES_USIZE]) -> Self { Self::from_bits($BUint::from_ne_bytes(bytes)) } } diff --git a/src/bint/numtraits.rs b/src/bint/numtraits.rs index a893362..4fe3733 100644 --- a/src/bint/numtraits.rs +++ b/src/bint/numtraits.rs @@ -160,9 +160,9 @@ use crate::ExpType; use num_integer::{Integer, Roots}; use num_traits::{ AsPrimitive, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, - CheckedShr, CheckedSub, CheckedEuclid, Euclid, FromPrimitive, MulAdd, MulAddAssign, Num, One, Pow, PrimInt, + CheckedShr, CheckedSub, CheckedEuclid, Euclid, FromPrimitive, MulAdd, MulAddAssign, Num, One, ConstOne, Pow, PrimInt, Saturating, SaturatingAdd, SaturatingMul, SaturatingSub, Signed, ToPrimitive, WrappingAdd, - WrappingMul, WrappingNeg, WrappingShl, WrappingShr, WrappingSub, Zero, + WrappingMul, WrappingNeg, WrappingShl, WrappingShr, WrappingSub, Zero, ConstZero }; use crate::cast::CastFrom; @@ -170,7 +170,7 @@ use crate::int::numtraits::num_trait_impl; macro_rules! numtraits { ($BUint: ident, $BInt: ident, $Digit: ident) => { - crate::int::numtraits::impls!($BInt, $BUint, $BInt); + crate::int::numtraits::impls!($BInt, $BUint, $BInt, $Digit); impl FromPrimitive for $BInt { from_uint!($Digit; u8, from_u8); diff --git a/src/buint/as_float.rs b/src/buint/as_float.rs deleted file mode 100644 index 3261564..0000000 --- a/src/buint/as_float.rs +++ /dev/null @@ -1,248 +0,0 @@ -use crate::ExpType; -use crate::cast::CastFrom; - -// #[cfg_attr(feature = "nightly", const_trait)] -pub trait CastToFloatHelper: Copy { - const ZERO: Self; - const BITS: ExpType; - - fn is_zero(&self) -> bool; - fn bits(&self) -> ExpType; - fn bit(&self, index: ExpType) -> bool; - fn trailing_zeros(self) -> ExpType; - fn shr(self, n: ExpType) -> Self; -} - -macro_rules! impl_helper_uint { - ($($uint: ty), *) => { - $( - crate::nightly::const_impl! { - impl const CastToFloatHelper for $uint { - const ZERO: Self = 0; - const BITS: ExpType = Self::BITS as ExpType; - - #[inline] - fn is_zero(&self) -> bool { - *self == 0 - } - - #[inline] - fn bits(&self) -> ExpType { - (<$uint>::BITS - self.leading_zeros()) as ExpType - } - - #[inline] - fn bit(&self, index: ExpType) -> bool { - *self & (1 << index) != 0 - } - - #[inline] - fn trailing_zeros(self) -> ExpType { - Self::trailing_zeros(self) as ExpType - } - - #[inline] - fn shr(self, n: ExpType) -> Self { - self >> n - } - } - } - )* - }; -} - -impl_helper_uint!(u8, u16, u32, u64, u128, usize); - -macro_rules! impl_helper_buint { - ($BUint: ident, $BInt: ident, $Digit: ident) => { - crate::nightly::const_impl! { - impl const CastToFloatHelper for $BUint { - const ZERO: Self = Self::ZERO; - const BITS: ExpType = Self::BITS; - - #[inline] - fn is_zero(&self) -> bool { - Self::is_zero(&self) - } - - #[inline] - fn bits(&self) -> ExpType { - Self::bits(&self) - } - - #[inline] - fn bit(&self, index: ExpType) -> bool { - Self::bit(&self, index) - } - - #[inline] - fn trailing_zeros(self) -> ExpType { - Self::trailing_zeros(self) - } - - #[inline] - fn shr(self, n: ExpType) -> Self { - Self::shr(self, n) - } - } - } - }; -} - -pub(crate) use impl_helper_buint; - -crate::macro_impl!(impl_helper_buint); - -// #[cfg_attr(feature = "nightly", const_trait)] -pub trait CastToFloatConsts { - type M: Mantissa; - - const ZERO: Self; - const MANTISSA_DIGITS: ExpType; - const MAX_EXP: Self::M; - const INFINITY: Self; - //const MANTISSA_MASK: Self::M = (Self::M::MAX.shr(Self::BITS - (Self::MANTISSA_DIGITS - 1))); - - fn from_raw_parts(exp: Self::M, mant: Self::M) -> Self; -} - -macro_rules! cast_to_float_consts { - ($($f: ty; $u: ty), *) => { - $( - // crate::nightly::const_impl! { - impl CastToFloatConsts for $f { - type M = $u; - - const ZERO: Self = 0.0; - const MANTISSA_DIGITS: ExpType = <$f>::MANTISSA_DIGITS as ExpType; - const MAX_EXP: Self::M = <$f>::MAX_EXP as $u; - const INFINITY: Self = <$f>::INFINITY; - - #[inline] - fn from_raw_parts(exp: Self::M, mant: Self::M) -> Self { - const MANTISSA_MASK: $u = <$u>::MAX >> (<$u>::BITS - (<$f>::MANTISSA_DIGITS - 1)); - <$f>::from_bits((exp << Self::MANTISSA_DIGITS - 1) | mant & MANTISSA_MASK) - } - } - // } - )* - }; -} - -cast_to_float_consts!(f32; u32, f64; u64); - -// #[cfg_attr(feature = "nightly", const_trait)] -pub trait Mantissa { - const ONE: Self; - const TWO: Self; - const MAX: Self; - const BITS: ExpType; - - fn bit(&self, n: ExpType) -> bool; - fn shl(self, n: ExpType) -> Self; - fn shr(self, n: ExpType) -> Self; - fn add(self, rhs: Self) -> Self; - fn sub(self, rhs: Self) -> Self; - fn leading_zeros(self) -> ExpType; - // fn bitand(self, rhs: Self) -> Self; - fn gt(&self, rhs: &Self) -> bool; -} - -macro_rules! impl_mantissa_for_uint { - ($($uint: ty), *) => { - $( - crate::nightly::const_impl! { - impl const Mantissa for $uint { - const ONE: Self = 1; - const TWO: Self = 2; - const MAX: Self = Self::MAX; - const BITS: ExpType = Self::BITS as ExpType; - - #[inline] - fn bit(&self, n: ExpType) -> bool { - *self & (1 << n) != 0 - } - - #[inline] - fn shl(self, n: ExpType) -> Self { - self << n - } - - #[inline] - fn shr(self, n: ExpType) -> Self { - self >> n - } - - #[inline] - fn add(self, rhs: Self) -> Self { - self + rhs - } - - #[inline] - fn sub(self, rhs: Self) -> Self { - self - rhs - } - - #[inline] - fn leading_zeros(self) -> ExpType { - Self::leading_zeros(self) as ExpType - } - - // #[inline] - // fn bitand(self, rhs: Self) -> Self { - // self & rhs - // } - - #[inline] - fn gt(&self, rhs: &Self) -> bool { - *self > *rhs - } - } - } - )* - }; -} - -impl_mantissa_for_uint!(u32, u64); - -pub fn cast_float_from_uint(from: U) -> F -where - F: CastToFloatConsts, - U: CastToFloatHelper, - F::M: CastFrom + CastFrom + Copy -{ - if from.is_zero() { - return F::ZERO; - } - let bits = from.bits(); - let mut mant = if U::BITS > F::M::BITS { - if bits < F::MANTISSA_DIGITS { - F::M::cast_from(from).shl(F::MANTISSA_DIGITS - bits) - } else { - F::M::cast_from(from.shr(bits - F::MANTISSA_DIGITS)) - } - } else if bits < F::MANTISSA_DIGITS { - F::M::cast_from(from).shl(F::MANTISSA_DIGITS - bits) - } else { - F::M::cast_from(from).shr(bits - F::MANTISSA_DIGITS) - }; - let mut round_up = true; - if bits <= F::MANTISSA_DIGITS - || !from.bit(bits - (F::MANTISSA_DIGITS + 1)) - || (!mant.bit(0) - && from.trailing_zeros() == bits - (F::MANTISSA_DIGITS + 1)) - { - round_up = false; - } - let mut exp = F::M::cast_from(bits).add(F::MAX_EXP).sub(F::M::TWO); - if round_up { - mant = mant.add(F::M::ONE); - if mant.leading_zeros() == F::M::BITS - (F::MANTISSA_DIGITS + 1) { - exp = exp.add(F::M::ONE); - } - } - if exp.gt(&(F::MAX_EXP.shl(1)).sub(F::M::ONE)) { - return F::INFINITY; - } - F::from_raw_parts(exp, mant) -} \ No newline at end of file diff --git a/src/buint/cast.rs b/src/buint/cast.rs index 0d303dd..9af8348 100644 --- a/src/buint/cast.rs +++ b/src/buint/cast.rs @@ -45,46 +45,8 @@ macro_rules! buint_as_float { impl CastFrom<$BUint> for $f { #[must_use = doc::must_use_op!()] #[inline] - fn cast_from(from: $BUint) -> Self { - crate::buint::as_float::cast_float_from_uint(from) - /*if from.is_zero() { - return 0.0; - } - let bits = from.bits(); - let mut mant = if $BUint::::BITS > <$u>::BITS { - if bits < <$f>::MANTISSA_DIGITS { - <$u>::cast_from(from) << (<$f>::MANTISSA_DIGITS - bits) - } else { - <$u>::cast_from(from >> (bits - <$f>::MANTISSA_DIGITS)) - } - } else if bits < <$f>::MANTISSA_DIGITS { - <$u>::cast_from(from) << (<$f>::MANTISSA_DIGITS - bits) - } else { - <$u>::cast_from(from) >> (bits - <$f>::MANTISSA_DIGITS) - }; - let mut round_up = true; - if bits <= <$f>::MANTISSA_DIGITS - || !from.bit(bits - (<$f>::MANTISSA_DIGITS + 1)) - || (mant & 1 == 0 - && from.trailing_zeros() == bits - (<$f>::MANTISSA_DIGITS + 1)) - { - round_up = false; - } - let mut exp = bits as $u + (<$f>::MAX_EXP - 1) as $u - 1; - if round_up { - mant += 1; - if mant.leading_zeros() == <$u>::BITS - (<$f>::MANTISSA_DIGITS + 1) { - exp += 1; - } - } - if exp > 2 * (<$f>::MAX_EXP as $u) - 1 { - return <$f>::INFINITY; - } - let mant = <$u>::cast_from(mant); - <$f>::from_bits( - (exp << (<$f>::MANTISSA_DIGITS - 1)) - | (mant & (<$u>::MAX >> (<$u>::BITS - (<$f>::MANTISSA_DIGITS as u32 - 1)))), - )*/ + fn cast_from(value: $BUint) -> Self { + crate::cast::float::cast_float_from_uint(value) } } }; @@ -126,8 +88,44 @@ use crate::doc; use crate::nightly::impl_const; // use core::mem::MaybeUninit; +use crate::ExpType; +use crate::cast::float::{FloatMantissa, CastUintFromFloatHelper, CastFloatFromUintHelper}; + macro_rules! cast { ($BUint: ident, $BInt: ident, $Digit: ident) => { + impl FloatMantissa for $BUint { + const ZERO: Self = Self::ZERO; + const ONE: Self = Self::ONE; + const TWO: Self = Self::TWO; + const MAX: Self = Self::MAX; + + #[inline] + fn leading_zeros(self) -> ExpType { + Self::leading_zeros(self) + } + + #[inline] + fn checked_shr(self, n: ExpType) -> Option { + Self::checked_shr(self, n) + } + + #[inline] + fn is_power_of_two(self) -> bool { + Self::is_power_of_two(self) + } + } + + impl CastUintFromFloatHelper for $BUint { + const MAX: Self = Self::MAX; + const MIN: Self = Self::MIN; + } + + impl CastFloatFromUintHelper for $BUint { + fn trailing_zeros(self) -> ExpType { + Self::trailing_zeros(self) + } + } + impl $BUint { crate::nightly::const_fn! { #[inline] @@ -223,16 +221,16 @@ macro_rules! cast { impl CastFrom for $BUint { #[must_use = doc::must_use_op!()] #[inline] - fn cast_from(from: f32) -> Self { - crate::buint::float_as::uint_cast_from_float(from) + fn cast_from(value: f32) -> Self { + crate::cast::float::cast_uint_from_float(value) } } impl CastFrom for $BUint { #[must_use = doc::must_use_op!()] #[inline] - fn cast_from(from: f64) -> Self { - crate::buint::float_as::uint_cast_from_float(from) + fn cast_from(value: f64) -> Self { + crate::cast::float::cast_uint_from_float(value) } } diff --git a/src/buint/checked.rs b/src/buint/checked.rs index f30120b..260dd11 100644 --- a/src/buint/checked.rs +++ b/src/buint/checked.rs @@ -48,200 +48,7 @@ macro_rules! checked { } (out, rem) } - const fn basecase_div_rem(self, mut v: Self, n: usize) -> (Self, Self) { - // The Art of Computer Programming Volume 2 by Donald Knuth, Section 4.3.1, Algorithm D - - let mut q = Self::ZERO; - let m = self.last_digit_index() + 1 - n; - let shift = v.digits[n - 1].leading_zeros() as ExpType; - - v = unsafe { - Self::unchecked_shl_internal(v, shift) - }; // D1 - - struct Remainder { - first: $Digit, - rest: [$Digit; M], - } - impl Remainder { - const fn digit(&self, index: usize) -> $Digit { - if index == 0 { - self.first - } else { - self.rest[index - 1] - } - } - const fn shr(self, shift: ExpType) -> $BUint { - let mut out = $BUint::ZERO; - let mut i = 0; - while i < M { - out.digits[i] = self.digit(i) >> shift; - i += 1; - } - if shift > 0 { - i = 0; - while i < M { - out.digits[i] |= self.rest[i] << (digit::$Digit::BITS as ExpType - shift); - i += 1; - } - } - out - } - const fn new(uint: $BUint, shift: ExpType) -> Self { - let first = uint.digits[0] << shift; - let rest = uint.wrapping_shr(digit::$Digit::BITS - shift); - Self { - first, - rest: rest.digits, - } - } - /*crate::nightly::const_fns! { - const fn set_digit(&mut self, index: usize, digit: $Digit) -> () { - if index == 0 { - self.first = digit; - } else { - self.rest[index - 1] = digit; - } - } - const fn sub(&mut self, rhs: Mul, start: usize, range: usize) -> bool { - let mut borrow = false; - let mut i = 0; - while i <= range { - let (sub, overflow) = digit::$Digit::borrowing_sub(self.digit(i + start), rhs.digit(i), borrow); - self.set_digit(i + start, sub); - borrow = overflow; - i += 1; - } - borrow - } - const fn add(&mut self, rhs: $BUint, start: usize, range: usize) -> () { - let mut carry = false; - let mut i = 0; - while i < range { - let (sum, overflow) = digit::$Digit::carrying_add(self.digit(i + start), rhs.digits[i], carry); - self.set_digit(i + start, sum); - carry = overflow; - i += 1; - } - if carry { - self.set_digit(range + start, self.digit(range + start).wrapping_add(1)); // we use wrapping_add here, not regular addition as a carry will always occur to the left of self.digit(range + start) - } - } - }*/ - const fn sub(mut self, rhs: Mul, start: usize, range: usize) -> (Self, bool) { - let mut borrow = false; - let mut i = 0; - while i <= range { - let (sub, overflow) = digit::$Digit::borrowing_sub(self.digit(i + start), rhs.digit(i), borrow); - if start == 0 && i == 0 { - self.first = sub; - } else { - self.rest[i + start - 1] = sub; - } - borrow = overflow; - i += 1; - } - (self, borrow) - } - const fn add(mut self, rhs: $BUint, start: usize, range: usize) -> Self { - let mut carry = false; - let mut i = 0; - while i < range { - let (sum, overflow) = digit::$Digit::carrying_add(self.digit(i + start), rhs.digits[i], carry); - if start == 0 && i == 0 { - self.first = sum; - } else { - self.rest[i + start - 1] = sum; - } - carry = overflow; - i += 1; - } - if carry { - if start == 0 && range == 0 { - self.first = self.first.wrapping_add(1); - } else { - self.rest[range + start - 1] = self.rest[range + start - 1].wrapping_add(1); - } - } - self - } - } - - #[derive(Clone, Copy)] - struct Mul { - last: $Digit, - rest: [$Digit; M], - } - impl Mul { - const fn new(uint: $BUint, rhs: $Digit) -> Self { - let mut rest = [0; M]; - let mut carry: $Digit = 0; - let mut i = 0; - while i < M { - let (prod, c) = digit::$Digit::carrying_mul(uint.digits[i], rhs, carry, 0); - carry = c; - rest[i] = prod; - i += 1; - } - Self { - last: carry, - rest, - } - } - const fn digit(&self, index: usize) -> $Digit { - if index == M { - self.last - } else { - self.rest[index] - } - } - } - - let v_n_m1 = v.digits[n - 1]; - let v_n_m2 = v.digits[n - 2]; - - let mut u = Remainder::new(self, shift); - - let mut j = m + 1; // D2 - while j > 0 { - j -= 1; // D7 - - let u_jn = u.digit(j + n); - - #[inline] - const fn tuple_gt(a: ($Digit, $Digit), b: ($Digit, $Digit)) -> bool { - a.1 > b.1 || a.1 == b.1 && a.0 > b.0 - } - - // q_hat will be either `q` or `q + 1` - let mut q_hat = if u_jn < v_n_m1 { - let (mut q_hat, r_hat) = digit::$Digit::div_rem_wide(u.digit(j + n - 1), u_jn, v_n_m1); // D3 - - if tuple_gt(digit::$Digit::widening_mul(q_hat, v_n_m2), (u.digit(j + n - 2), r_hat as $Digit)) { - q_hat -= 1; - - if let Some(r_hat) = r_hat.checked_add(v_n_m1) { // this checks if `r_hat <= b`, where `b` is the digit base - if tuple_gt(digit::$Digit::widening_mul(q_hat, v_n_m2), (u.digit(j + n - 2), r_hat as $Digit)) { - q_hat -= 1; - } - } - } - q_hat - } else { - // `u[j + n - 1] >= v[n - 1]` so we know that estimate for q_hat would be larger than `Digit::MAX`. This is either equal to `q` or `q + 1` (very unlikely to be `q + 1`). - $Digit::MAX - }; - let (u_new, overflow) = u.sub(Mul::new(v, q_hat), j, n); // D4 - u = u_new; - - if overflow { // D5 - unlikely, probability of this being true is ~ 2 / b where b is the digit base (i.e. `Digit::MAX + 1`) - q_hat -= 1; - u = u.add(v, j, n); - } - q.digits[j] = q_hat; - } - (q, u.shr(shift)) - } + #[inline] pub(crate) const fn div_rem_unchecked(self, rhs: Self) -> (Self, Self) { diff --git a/src/buint/div.rs b/src/buint/div.rs new file mode 100644 index 0000000..b988ef2 --- /dev/null +++ b/src/buint/div.rs @@ -0,0 +1,205 @@ +use crate::ExpType; +use crate::digit; + +macro_rules! div_impl { + ($BUint: ident, $BInt: ident, $Digit: ident) => { + impl $BUint { + pub(crate) const fn basecase_div_rem(self, mut v: Self, n: usize) -> (Self, Self) { + // The Art of Computer Programming Volume 2 by Donald Knuth, Section 4.3.1, Algorithm D + + let mut q = Self::ZERO; + let m = self.last_digit_index() + 1 - n; + let shift = v.digits[n - 1].leading_zeros() as ExpType; + + v = unsafe { + Self::unchecked_shl_internal(v, shift) + }; // D1 + + struct Remainder { + first: $Digit, + rest: [$Digit; M], + } + impl Remainder { + const fn digit(&self, index: usize) -> $Digit { + if index == 0 { + self.first + } else { + self.rest[index - 1] + } + } + const fn shr(self, shift: ExpType) -> $BUint { + let mut out = $BUint::ZERO; + let mut i = 0; + while i < M { + out.digits[i] = self.digit(i) >> shift; + i += 1; + } + if shift > 0 { + i = 0; + while i < M { + out.digits[i] |= self.rest[i] << (digit::$Digit::BITS as ExpType - shift); + i += 1; + } + } + out + } + const fn new(uint: $BUint, shift: ExpType) -> Self { + let first = uint.digits[0] << shift; + let rest = uint.wrapping_shr(digit::$Digit::BITS - shift); + Self { + first, + rest: rest.digits, + } + } + /*crate::nightly::const_fns! { + const fn set_digit(&mut self, index: usize, digit: $Digit) -> () { + if index == 0 { + self.first = digit; + } else { + self.rest[index - 1] = digit; + } + } + const fn sub(&mut self, rhs: Mul, start: usize, range: usize) -> bool { + let mut borrow = false; + let mut i = 0; + while i <= range { + let (sub, overflow) = digit::$Digit::borrowing_sub(self.digit(i + start), rhs.digit(i), borrow); + self.set_digit(i + start, sub); + borrow = overflow; + i += 1; + } + borrow + } + const fn add(&mut self, rhs: $BUint, start: usize, range: usize) -> () { + let mut carry = false; + let mut i = 0; + while i < range { + let (sum, overflow) = digit::$Digit::carrying_add(self.digit(i + start), rhs.digits[i], carry); + self.set_digit(i + start, sum); + carry = overflow; + i += 1; + } + if carry { + self.set_digit(range + start, self.digit(range + start).wrapping_add(1)); // we use wrapping_add here, not regular addition as a carry will always occur to the left of self.digit(range + start) + } + } + }*/ + const fn sub(mut self, rhs: Mul, start: usize, range: usize) -> (Self, bool) { + let mut borrow = false; + let mut i = 0; + while i <= range { + let (sub, overflow) = digit::$Digit::borrowing_sub(self.digit(i + start), rhs.digit(i), borrow); + if start == 0 && i == 0 { + self.first = sub; + } else { + self.rest[i + start - 1] = sub; + } + borrow = overflow; + i += 1; + } + (self, borrow) + } + const fn add(mut self, rhs: $BUint, start: usize, range: usize) -> Self { + let mut carry = false; + let mut i = 0; + while i < range { + let (sum, overflow) = digit::$Digit::carrying_add(self.digit(i + start), rhs.digits[i], carry); + if start == 0 && i == 0 { + self.first = sum; + } else { + self.rest[i + start - 1] = sum; + } + carry = overflow; + i += 1; + } + if carry { + if start == 0 && range == 0 { + self.first = self.first.wrapping_add(1); + } else { + self.rest[range + start - 1] = self.rest[range + start - 1].wrapping_add(1); + } + } + self + } + } + + #[derive(Clone, Copy)] + struct Mul { + last: $Digit, + rest: [$Digit; M], + } + impl Mul { + const fn new(uint: $BUint, rhs: $Digit) -> Self { + let mut rest = [0; M]; + let mut carry: $Digit = 0; + let mut i = 0; + while i < M { + let (prod, c) = digit::$Digit::carrying_mul(uint.digits[i], rhs, carry, 0); + carry = c; + rest[i] = prod; + i += 1; + } + Self { + last: carry, + rest, + } + } + const fn digit(&self, index: usize) -> $Digit { + if index == M { + self.last + } else { + self.rest[index] + } + } + } + + let v_n_m1 = v.digits[n - 1]; + let v_n_m2 = v.digits[n - 2]; + + let mut u = Remainder::new(self, shift); + + let mut j = m + 1; // D2 + while j > 0 { + j -= 1; // D7 + + let u_jn = u.digit(j + n); + + #[inline] + const fn tuple_gt(a: ($Digit, $Digit), b: ($Digit, $Digit)) -> bool { + a.1 > b.1 || a.1 == b.1 && a.0 > b.0 + } + + // q_hat will be either `q` or `q + 1` + let mut q_hat = if u_jn < v_n_m1 { + let (mut q_hat, r_hat) = digit::$Digit::div_rem_wide(u.digit(j + n - 1), u_jn, v_n_m1); // D3 + + if tuple_gt(digit::$Digit::widening_mul(q_hat, v_n_m2), (u.digit(j + n - 2), r_hat as $Digit)) { + q_hat -= 1; + + if let Some(r_hat) = r_hat.checked_add(v_n_m1) { // this checks if `r_hat <= b`, where `b` is the digit base + if tuple_gt(digit::$Digit::widening_mul(q_hat, v_n_m2), (u.digit(j + n - 2), r_hat as $Digit)) { + q_hat -= 1; + } + } + } + q_hat + } else { + // `u[j + n - 1] >= v[n - 1]` so we know that estimate for q_hat would be larger than `Digit::MAX`. This is either equal to `q` or `q + 1` (very unlikely to be `q + 1`). + $Digit::MAX + }; + let (u_new, overflow) = u.sub(Mul::new(v, q_hat), j, n); // D4 + u = u_new; + + if overflow { // D5 - unlikely, probability of this being true is ~ 2 / b where b is the digit base (i.e. `Digit::MAX + 1`) + q_hat -= 1; + u = u.add(v, j, n); + } + q.digits[j] = q_hat; + } + (q, u.shr(shift)) + } + } + }; +} + +crate::macro_impl!(div_impl); \ No newline at end of file diff --git a/src/buint/endian.rs b/src/buint/endian.rs index 532f847..71936d9 100644 --- a/src/buint/endian.rs +++ b/src/buint/endian.rs @@ -175,8 +175,8 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_be_bytes(self) -> [u8; N * digit::$Digit::BYTES as usize] { - let mut bytes = [0; N * digit::$Digit::BYTES as usize]; + pub const fn to_be_bytes(self) -> [u8; Self::BYTES_USIZE] { + let mut bytes = [0; Self::BYTES_USIZE]; let mut i = N; while i > 0 { let digit_bytes = self.digits[N - i].to_be_bytes(); @@ -195,12 +195,12 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_le_bytes(self) -> [u8; N * digit::$Digit::BYTES as usize] { + pub const fn to_le_bytes(self) -> [u8; Self::BYTES_USIZE] { // Strangely, this is slightly faster than direct transmutation by either `mem::transmute_copy` or `ptr::read`. // Also, initialising the bytes with zeros is faster than using MaybeUninit. // The Rust compiler is probably being very smart and optimizing this code. // The same goes for `to_be_bytes`. - let mut bytes = [0; N * digit::$Digit::BYTES as usize]; + let mut bytes = [0; Self::BYTES_USIZE]; let mut i = 0; while i < N { let digit_bytes = self.digits[i].to_le_bytes(); @@ -219,7 +219,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_ne_bytes(self) -> [u8; N * digit::$Digit::BYTES as usize] { + pub const fn to_ne_bytes(self) -> [u8; Self::BYTES_USIZE] { #[cfg(target_endian = "big")] return self.to_be_bytes(); #[cfg(not(target_endian = "big"))] @@ -231,7 +231,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use] #[inline] - pub const fn from_be_bytes(bytes: [u8; N * digit::$Digit::BYTES as usize]) -> Self { + pub const fn from_be_bytes(bytes: [u8; Self::BYTES_USIZE]) -> Self { let mut out = Self::ZERO; // let arr_ptr = bytes.as_ptr(); let mut i = 0; @@ -254,7 +254,7 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use] #[inline] - pub const fn from_le_bytes(bytes: [u8; N * digit::$Digit::BYTES as usize]) -> Self { + pub const fn from_le_bytes(bytes: [u8; Self::BYTES_USIZE]) -> Self { let mut out = Self::ZERO; // let arr_ptr = bytes.as_ptr(); let mut i = 0; @@ -277,13 +277,15 @@ macro_rules! endian { #[doc = doc::requires_feature!("nightly")] #[must_use] #[inline] - pub const fn from_ne_bytes(bytes: [u8; N * digit::$Digit::BYTES as usize]) -> Self { + pub const fn from_ne_bytes(bytes: [u8; Self::BYTES_USIZE]) -> Self { #[cfg(target_endian = "big")] return Self::from_be_bytes(bytes); #[cfg(not(target_endian = "big"))] Self::from_le_bytes(bytes) } + + pub(crate) const BYTES_USIZE: usize = N * digit::$Digit::BYTES as usize; } #[cfg(test)] diff --git a/src/buint/float_as.rs b/src/buint/float_as.rs deleted file mode 100644 index 37349d6..0000000 --- a/src/buint/float_as.rs +++ /dev/null @@ -1,205 +0,0 @@ -use crate::ExpType; -use crate::cast::CastFrom; - -pub trait Mantissa: Sized { - const ZERO: Self; - - fn checked_shr(self, n: ExpType) -> Option; - fn bits(&self) -> ExpType; -} - -pub trait Exponent { - fn is_negative(&self) -> bool; - fn neg(self) -> Self; -} - -pub trait CastUintFromFloatConsts { - const ZERO: Self; - const MIN: Self; - const MAX: Self; - const BITS: ExpType; - - fn shl(self, n: ExpType) -> Self; -} - -pub trait CastUintFromFloatHelper: CastFrom { - type M: Mantissa; - type E: Exponent; - - fn is_nan(&self) -> bool; - fn is_sign_negative(&self) -> bool; - fn is_infinite(&self) -> bool; - fn decode(self) -> (Self::M, Self::E); -} - -macro_rules! cast_uint_from_float_consts { - ($($uint: ident), *) => { - $( - impl CastUintFromFloatConsts for $uint { - const ZERO: Self = 0; - const MIN: Self = Self::MIN; - const MAX: Self = Self::MAX; - const BITS: ExpType = Self::BITS; - - fn shl(self, n: ExpType) -> Self { - self << n - } - } - )* - }; -} - -pub(crate) use cast_uint_from_float_consts; - -macro_rules! as_float { - ($BUint: ident, $BInt: ident, $Digit: ident) => { - impl Mantissa for $BUint { - const ZERO: Self = Self::ZERO; - - #[inline] - fn checked_shr(self, n: ExpType) -> Option { - Self::checked_shr(self, n) - } - - #[inline] - fn bits(&self) -> ExpType { - Self::bits(&self) - } - } - - impl Exponent for $BInt { - #[inline] - fn is_negative(&self) -> bool { - Self::is_negative(*self) - } - - #[inline] - fn neg(self) -> Self { - Self::neg(self) - } - } - - impl CastUintFromFloatConsts for $BUint { - const ZERO: Self = Self::ZERO; - const MIN: Self = Self::MIN; - const MAX: Self = Self::MAX; - const BITS: ExpType = Self::BITS; - - fn shl(self, n: ExpType) -> Self { - Self::shl(self, n) - } - } - }; -} - -pub(crate) use as_float; - -pub fn uint_cast_from_float(f: F) -> U -where - F: CastUintFromFloatHelper, - U: CastUintFromFloatConsts + CastFrom, - ExpType: TryFrom -{ - if f.is_nan() { - return U::ZERO; - } - if f.is_sign_negative() { - return U::MIN; - } - if f.is_infinite() { - return U::MAX; - } - let (mut mant, exp) = f.decode(); - if exp.is_negative() { - let r: Result = exp.neg().try_into(); - mant = match r { - Ok(exp) => mant.checked_shr(exp).unwrap_or(::ZERO), - _ => ::ZERO, - }; - if mant.bits() > U::BITS { - return U::MAX; - } - U::cast_from(mant) - } else { - let exp: Result = exp.try_into(); - match exp { - Ok(exp) => { - if mant.bits() + exp > U::BITS { - return U::MAX; - } - U::cast_from(mant).shl(exp) - }, - _ => U::MAX - } - } -} - -crate::macro_impl!(as_float); - -crate::buint::float_as::cast_uint_from_float_consts!(u8, u16, u32, u64, u128, usize); - -macro_rules! impl_mantissa_for_uint { - ($($uint: ty), *) => { - $( - impl Mantissa for $uint { - const ZERO: Self = 0; - - #[inline] - fn checked_shr(self, n: ExpType) -> Option { - self.checked_shr(n as u32) - } - - #[inline] - fn bits(&self) -> ExpType { - (Self::BITS - self.leading_zeros()) as ExpType - } - } - )* - } -} - -impl_mantissa_for_uint!(u32, u64); - -impl Exponent for i16 { - #[inline] - fn is_negative(&self) -> bool { - Self::is_negative(*self) - } - - #[inline] - fn neg(self) -> Self { - -self - } -} - -macro_rules! impl_cast_uint_from_primitive_float_helper { - ($float: ty, $mant: ty, $exp: ty, $decoder: ident) => { - impl CastUintFromFloatHelper for $float { - type M = $mant; - type E = $exp; - - #[inline] - fn is_nan(&self) -> bool { - Self::is_nan(*self) - } - - #[inline] - fn is_sign_negative(&self) -> bool { - Self::is_sign_negative(*self) - } - - #[inline] - fn is_infinite(&self) -> bool { - Self::is_infinite(*self) - } - - #[inline] - fn decode(self) -> (Self::M, Self::E) { - crate::buint::cast::$decoder(self) - } - } - }; -} - -impl_cast_uint_from_primitive_float_helper!(f32, u32, i16, decode_f32); -impl_cast_uint_from_primitive_float_helper!(f64, u64, i16, decode_f64); \ No newline at end of file diff --git a/src/buint/mod.rs b/src/buint/mod.rs index ecec99a..02b0724 100644 --- a/src/buint/mod.rs +++ b/src/buint/mod.rs @@ -508,6 +508,11 @@ macro_rules! mod_impl { out } + #[inline(always)] + pub(crate) const fn digit(&self, index: usize) -> $Digit { + self.digits[index] + } + /// Returns the digits stored in `self` as an array. Digits are little endian (least significant digit first). #[must_use] #[inline(always)] @@ -725,7 +730,6 @@ macro_rules! mod_impl { crate::macro_impl!(mod_impl); -pub mod float_as; mod bigint_helpers; pub mod cast; mod checked; @@ -733,9 +737,10 @@ mod cmp; mod const_trait_fillers; mod consts; mod convert; +mod div; mod endian; -pub mod as_float; mod fmt; +mod mul; #[cfg(feature = "numtraits")] mod numtraits; mod ops; diff --git a/src/buint/mul.rs b/src/buint/mul.rs new file mode 100644 index 0000000..c4b8562 --- /dev/null +++ b/src/buint/mul.rs @@ -0,0 +1,79 @@ +use crate::digit; + +macro_rules! mul { + ($BUint: ident, $BInt: ident, $Digit: ident) => { + impl $BUint { + #[inline] + pub(super) const fn long_mul(self, rhs: Self) -> (Self, bool) { + let mut overflow = false; + let mut out = Self::ZERO; + let mut carry: $Digit; + + let mut i = 0; + while i < N { + carry = 0; + let mut j = 0; + while j < N { + let index = i + j; + if index < N { + let (prod, c) = digit::$Digit::carrying_mul( + self.digits[i], + rhs.digits[j], + carry, + out.digits[index], + ); + out.digits[index] = prod; + carry = c; + } else if self.digits[i] != 0 && rhs.digits[j] != 0 { + overflow = true; + break; + } + j += 1; + } + if carry != 0 { + overflow = true; + } + i += 1; + } + (out, overflow) + } + } + } +} + +// fn karatsuba(a: BUintD8, b: BUintD8, start_index: usize, end_index: usize) -> BUintD8 { +// if a.last_digit_index() == 0 { +// return b * a.digits[0]; +// } +// if b.last_digit_index() == 0 { +// return a * b.digits[0]; +// } +// let mid_index = (start_index + end_index) / 2; // TODO: replace this with midpoint +// let z2 = karatsuba(a, b, mid_index, end_index); +// let z0 = karatsuba(a, b, end_index, mid_index); +// // let d1 = abs_diff(x0, x1); +// // let d2 = abs_diff(y0, y1); +// // +// // let a = karatsuba((x0 - x1)(y1 - y0)) +// // let z1 = a + z2 + +// todo!() +// } + +// fn karat_widening(x: &mut [u8], y: &mut [u8]) -> (&mut [u8], &mut [u8]) { +// debug_assert_eq!(x.len(), y.len()); +// if x.len() == 1 { +// let (a, b) = (x[0], y[0]); +// let (c, d) = a.widening_mul(b); +// x[0] = c; +// y[0] = d; +// return (x, y); +// } +// let m = x.len().div_ceil(2); // midpoint index +// let (x0, x1) = x.split_at_mut(m); +// let (y0, y1) = y.split_at_mut(m); +// let (a1karat_widening(x1, y1); +// karat_widening(x0, y0); +// } + +crate::macro_impl!(mul); \ No newline at end of file diff --git a/src/buint/numtraits.rs b/src/buint/numtraits.rs index d136551..b408895 100644 --- a/src/buint/numtraits.rs +++ b/src/buint/numtraits.rs @@ -55,9 +55,9 @@ use crate::ExpType; use num_integer::{Integer, Roots}; use num_traits::{ AsPrimitive, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, - CheckedShr, CheckedSub, CheckedEuclid, Euclid, FromPrimitive, MulAdd, MulAddAssign, Num, One, Pow, PrimInt, + CheckedShr, CheckedSub, CheckedEuclid, Euclid, FromPrimitive, MulAdd, MulAddAssign, Num, One, ConstOne, Pow, PrimInt, Saturating, SaturatingAdd, SaturatingMul, SaturatingSub, ToPrimitive, Unsigned, WrappingAdd, - WrappingMul, WrappingNeg, WrappingShl, WrappingShr, WrappingSub, Zero, + WrappingMul, WrappingNeg, WrappingShl, WrappingShr, WrappingSub, Zero, ConstZero }; use crate::cast::CastFrom; @@ -65,7 +65,7 @@ use crate::int::numtraits::num_trait_impl; macro_rules! numtraits { ($BUint: ident, $BInt: ident, $Digit: ident) => { - crate::int::numtraits::impls!($BUint, $BUint, $BInt); + crate::int::numtraits::impls!($BUint, $BUint, $BInt, $Digit); macro_rules! from_float { ($method: ident, $float: ty, $decoder: ident, $mant_bits: ident) => { @@ -153,6 +153,7 @@ macro_rules! numtraits { } } + // TODO: replace this with code from the cast/float module from_float!(from_f32, f32, decode_f32, u32_bits); from_float!(from_f64, f64, decode_f64, u64_bits); } diff --git a/src/buint/overflowing.rs b/src/buint/overflowing.rs index a2f8bf9..967537f 100644 --- a/src/buint/overflowing.rs +++ b/src/buint/overflowing.rs @@ -47,41 +47,6 @@ macro_rules! overflowing { (out, borrow) } - #[inline] - const fn long_mul(self, rhs: Self) -> (Self, bool) { - let mut overflow = false; - let mut out = Self::ZERO; - let mut carry: $Digit; - - let mut i = 0; - while i < N { - carry = 0; - let mut j = 0; - while j < N { - let index = i + j; - if index < N { - let (prod, c) = digit::$Digit::carrying_mul( - self.digits[i], - rhs.digits[j], - carry, - out.digits[index], - ); - out.digits[index] = prod; - carry = c; - } else if self.digits[i] != 0 && rhs.digits[j] != 0 { - overflow = true; - break; - } - j += 1; - } - if carry != 0 { - overflow = true; - } - i += 1; - } - (out, overflow) - } - #[doc = doc::overflowing::overflowing_mul!(U)] #[must_use = doc::must_use_op!()] #[inline] diff --git a/src/cast/float/float_from_uint.rs b/src/cast/float/float_from_uint.rs new file mode 100644 index 0000000..51e6575 --- /dev/null +++ b/src/cast/float/float_from_uint.rs @@ -0,0 +1,73 @@ +use crate::helpers::{Bits, One}; +use crate::cast::CastFrom; +use crate::ExpType; +use super::FloatCastHelper; +use core::ops::{Shr, Add}; + +pub trait CastFloatFromUintHelper: Bits + Shr { + fn trailing_zeros(self) -> ExpType; +} + +macro_rules! impl_cast_float_from_uint_helper_for_primitive_uint { + ($($uint: ty), *) => { + $( + impl CastFloatFromUintHelper for $uint { + #[inline] + fn trailing_zeros(self) -> ExpType { + Self::trailing_zeros(self) as ExpType + } + } + )* + }; +} + +impl_cast_float_from_uint_helper_for_primitive_uint!(u8, u16, u32, u64, u128, usize); + +pub fn cast_float_from_uint(value: U) -> F +where + F: FloatCastHelper, + F::SignedExp: TryFrom + One + Add, + F::Mantissa: CastFrom + One, + U: CastFloatFromUintHelper + Copy +{ + let bit_width = value.bits(); // number of bits needed to specify value = exponent of largest power of two smaller than value. so bit_width will be one less than the exponent of the float + // let mant = if F::M::BITS < U::BITS { + + // } + if bit_width == 0 { + // in this case, value is zero + return F::ZERO; + } + let exponent = bit_width - 1; // value lies in range [2^(bit_width - 1), 2^bit_width) + + match F::SignedExp::try_from(exponent) { + Ok(mut exponent) => { + if exponent >= F::MAX_EXP { + // exponent is too large + return F::INFINITY; + } + let mantissa = if bit_width <= F::MANTISSA_DIGITS { + // in this case, we can safely cast which preserves the value (no truncation) + F::Mantissa::cast_from(value) << (F::MANTISSA_DIGITS - bit_width) + } else { + // note: we know that exponent >= F::MANTISSA_DIGITS, so only way + // TODO: we could generalise the round_mantissa_exponent code so that this could just be a call of round_mantissa_exponent instead + let shift = bit_width - F::MANTISSA_DIGITS; + let gte_half = value.bit(shift - 1); // is the discarded part greater than or equal to a half? + let mut shifted_mantissa = F::Mantissa::cast_from(value >> shift); + if gte_half && (shifted_mantissa.bit(0) || value.trailing_zeros() != shift - 1) { + // by ties-to-even rule, round up + shifted_mantissa = shifted_mantissa + F::Mantissa::ONE; + if shifted_mantissa.bit(F::MANTISSA_DIGITS) { + // adding one overflowed to greater than mantissa width, so increment exponent and renormalised mantissa + shifted_mantissa = shifted_mantissa >> 1; + exponent = exponent + F::SignedExp::ONE; + } + } + shifted_mantissa + }; + F::from_signed_parts(false, exponent, mantissa) + }, + _ => F::INFINITY, // in this case, the exponent doesn't even fit into the float signed exponent, so is too big to be stored by F + } +} \ No newline at end of file diff --git a/src/cast/float/mod.rs b/src/cast/float/mod.rs new file mode 100644 index 0000000..63a3279 --- /dev/null +++ b/src/cast/float/mod.rs @@ -0,0 +1,244 @@ +use crate::ExpType; +use core::ops::{Add, Shl, Shr, BitAnd, Neg}; +use crate::helpers::Bits; + +mod uint_from_float; +mod float_from_uint; +pub use uint_from_float::*; +pub use float_from_uint::*; + +pub trait FloatMantissa: Sized + Shl + Shr + Add + BitAnd + PartialEq + Bits { + const ZERO: Self; + const ONE: Self; + const TWO: Self; + const MAX: Self; + + fn leading_zeros(self) -> ExpType; + fn checked_shr(self, n: ExpType) -> Option; + fn is_power_of_two(self) -> bool; +} + +macro_rules! impl_float_mantissa_for_uint { + ($($uint: ty), *) => { + $( + crate::nightly::const_impl! { + impl const FloatMantissa for $uint { + const ZERO: Self = 0; + const ONE: Self = 1; + const TWO: Self = 2; + const MAX: Self = Self::MAX; + + #[inline] + fn leading_zeros(self) -> ExpType { + Self::leading_zeros(self) as ExpType + } + + #[inline] + fn checked_shr(self, n: ExpType) -> Option { + Self::checked_shr(self, n as u32) + } + + #[inline] + fn is_power_of_two(self) -> bool { + Self::is_power_of_two(self) + } + } + } + )* + }; +} + +impl_float_mantissa_for_uint!(u32, u64); + +pub trait ConvertFloatParts { + type Mantissa: FloatMantissa; + type UnsignedExp; + type SignedExp: PartialEq + PartialOrd; + + fn into_raw_parts(self) -> (bool, Self::UnsignedExp, Self::Mantissa); + fn into_biased_parts(self) -> (bool, Self::UnsignedExp, Self::Mantissa); + fn into_signed_biased_parts(self) -> (bool, Self::SignedExp, Self::Mantissa); + fn into_signed_parts(self) -> (bool, Self::SignedExp, Self::Mantissa); + fn into_normalised_signed_parts(self) -> (bool, Self::SignedExp, Self::Mantissa); + + fn from_raw_parts(sign: bool, exponent: Self::UnsignedExp, mantissa: Self::Mantissa) -> Self; + fn from_biased_parts(sign: bool, exponent: Self::UnsignedExp, mantissa: Self::Mantissa) -> Self; + fn from_signed_biased_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self; + fn from_signed_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self; + fn round_exponent_mantissa(exponent: Self::SignedExp, mantissa: Self::Mantissa, shift: ExpType) -> (Self::SignedExp, Self::Mantissa); + fn from_normalised_signed_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self; +} + +macro_rules! impl_convert_float_parts_for_primitive_float { + ($float_type: ty, $unsigned_exponent_type: ty, $signed_exponent_type: ty, $mantissa_type: ty, $float_bit_width: literal) => { + impl ConvertFloatParts for $float_type { + type Mantissa = $mantissa_type; + type UnsignedExp = $unsigned_exponent_type; + type SignedExp = $signed_exponent_type; + + #[inline] + fn into_raw_parts(self) -> (bool, Self::UnsignedExp, Self::Mantissa) { + let sign = self.is_sign_negative(); + const SIGN_MASK: $mantissa_type = <$mantissa_type>::MAX >> 1; + let exp = (self.to_bits() & SIGN_MASK) >> (<$float_type>::MANTISSA_DIGITS - 1); + let mant = self.to_bits() & (<$mantissa_type>::MAX >> (<$mantissa_type>::BITS - (<$float_type>::MANTISSA_DIGITS - 1))); + + (sign, exp as _, mant) + } + + #[inline] + fn into_biased_parts(self) -> (bool, Self::UnsignedExp, Self::Mantissa) { + let (sign, exp, mant) = self.into_raw_parts(); + if exp == 0 { + (sign, 1, mant) + } else { + (sign, exp, mant | (1 << (<$float_type>::MANTISSA_DIGITS - 1))) + } + } + + #[inline] + fn into_signed_biased_parts(self) -> (bool, Self::SignedExp, Self::Mantissa) { + let (sign, exp, mant) = self.into_biased_parts(); + (sign, exp as Self::SignedExp, mant) + } + + #[inline] + fn into_signed_parts(self) -> (bool, Self::SignedExp, Self::Mantissa) { + let (sign, exp, mant) = self.into_signed_biased_parts(); + const EXP_BIAS: $signed_exponent_type = <$float_type>::MAX_EXP - 1; + (sign, exp - EXP_BIAS, mant) + } + + #[inline] + fn into_normalised_signed_parts(self) -> (bool, Self::SignedExp, Self::Mantissa) { + let (sign, exp, mant) = self.into_signed_parts(); + use crate::helpers::Bits; + let shift = Self::MANTISSA_DIGITS - mant.bits(); + if mant == 0 || shift == 0 { + (sign, exp, mant) + } else { + let normalised_mant = mant >> shift; + let normalised_exp = exp - (shift as Self::SignedExp); + + (sign, normalised_exp, normalised_mant) + } + } + + #[inline] + fn from_raw_parts(sign: bool, exponent: Self::UnsignedExp, mantissa: Self::Mantissa) -> Self { + debug_assert!(mantissa.bits() <= <$float_type>::MANTISSA_DIGITS - 1); + + let mut bits = (exponent as $mantissa_type) << (<$float_type>::MANTISSA_DIGITS - 1) | mantissa; + if sign { + bits |= 1 << ($float_bit_width - 1); + } + Self::from_bits(bits) + } + + #[inline] + fn from_biased_parts(sign: bool, mut exponent: Self::UnsignedExp, mut mantissa: Self::Mantissa) -> Self { + debug_assert!(exponent != 0); + + if mantissa.bit(Self::MANTISSA_DIGITS - 1) { + mantissa ^= 1 << (Self::MANTISSA_DIGITS - 1); + } else { + debug_assert!(exponent == 1); + exponent = 0; + } + Self::from_raw_parts(sign, exponent, mantissa) + } + + #[inline] + fn from_signed_biased_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self { + debug_assert!(!exponent.is_negative()); + let exponent = exponent as Self::UnsignedExp; + Self::from_biased_parts(sign, exponent, mantissa) + } + + #[inline] + fn from_signed_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self { + const EXP_BIAS: <$float_type as ConvertFloatParts>::SignedExp = <$float_type>::MAX_EXP - 1; + let exponent = exponent + EXP_BIAS; + Self::from_signed_biased_parts(sign, exponent, mantissa) + } + + #[inline] + fn round_exponent_mantissa(mut exponent: Self::SignedExp, mantissa: Self::Mantissa, shift: ExpType) -> (Self::SignedExp, Self::Mantissa) { + let mut shifted_mantissa = mantissa >> shift; + if !TIES_EVEN { + return (exponent, shifted_mantissa); // if not TIES_EVEN, then we truncate + } + let discarded_shifted_bits = mantissa & (<$mantissa_type>::MAX >> ($float_bit_width - shift)); + if discarded_shifted_bits.bit(shift - 1) { // in this case, the discarded portion is at least a half + if shifted_mantissa & 1 == 1 || !discarded_shifted_bits.is_power_of_two() { // in this case, ties to even says we round up. checking if not a power of two tells us that there is at least one bit set to 1 (after the most significant bit set to 1). we check in this order as is_odd is O(1) whereas is_power_of_two is O(N) + shifted_mantissa = shifted_mantissa + 1; + if shifted_mantissa.bit(shift) { // check for overflow (with respect to the mantissa bit width) + exponent += 1; + shifted_mantissa = shifted_mantissa >> 1; + } + } + } + (exponent, shifted_mantissa) + } + + #[inline] + fn from_normalised_signed_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self { + use crate::helpers::Bits; + + debug_assert!(mantissa == 0 || mantissa.bits() == Self::MANTISSA_DIGITS); + if exponent < Self::MIN_EXP - 1 { + let shift = (Self::MIN_EXP - 1 - exponent) as ExpType; + let (out_exponent, out_mantissa) = Self::round_exponent_mantissa::(Self::MIN_EXP - 1, mantissa, shift); + + Self::from_signed_parts(sign, out_exponent, out_mantissa) + } else { + Self::from_signed_parts(sign, exponent, mantissa) + } + } + } + }; +} + +impl_convert_float_parts_for_primitive_float!(f32, u32, i32, u32, 32); +impl_convert_float_parts_for_primitive_float!(f64, u32, i32, u64, 64); + +pub trait FloatCastHelper: Neg + ConvertFloatParts + PartialEq { + const BITS: ExpType; + const MANTISSA_DIGITS: ExpType; + const EXPONENT_BITS: ExpType = Self::BITS - Self::MANTISSA_DIGITS; + const MAX_EXP: ::SignedExp; + const MIN_SUBNORMAL_EXP: ::SignedExp; + const INFINITY: Self; + const ZERO: Self; + const NEG_ZERO: Self; + + fn is_nan(&self) -> bool; + fn is_infinite(&self) -> bool; +} + +macro_rules! impl_cast_float_from_float_helper_for_primitive_float { + ($float_type: ty, $mantissa_type: ty, $exponent_type: ty, $float_type_bit_width: literal) => { + impl FloatCastHelper for $float_type { + const BITS: ExpType = $float_type_bit_width; + const MANTISSA_DIGITS: ExpType = Self::MANTISSA_DIGITS as ExpType; + const MAX_EXP: ::SignedExp = Self::MAX_EXP; + const MIN_SUBNORMAL_EXP: ::SignedExp = Self::MIN_EXP + 1 - Self::MANTISSA_DIGITS as ::SignedExp; + const INFINITY: Self = Self::INFINITY; + const ZERO: Self = 0.0; + const NEG_ZERO: Self = -0.0; + + #[inline] + fn is_nan(&self) -> bool { + Self::is_nan(*self) + } + + #[inline] + fn is_infinite(&self) -> bool { + Self::is_infinite(*self) + } + } + } +} + +impl_cast_float_from_float_helper_for_primitive_float!(f32, u32, i32, 32); +impl_cast_float_from_float_helper_for_primitive_float!(f64, u64, i32, 64); diff --git a/src/cast/float/uint_from_float.rs b/src/cast/float/uint_from_float.rs new file mode 100644 index 0000000..215907b --- /dev/null +++ b/src/cast/float/uint_from_float.rs @@ -0,0 +1,81 @@ +use core::ops::{Neg, Shl}; +use crate::cast::CastFrom; +use crate::ExpType; +use super::FloatMantissa; +use super::FloatCastHelper; +use crate::helpers::{Bits, One, Zero}; + +pub trait CastUintFromFloatHelper: Zero + One + Bits { + const MAX: Self; + const MIN: Self; +} + +macro_rules! impl_cast_uint_from_float_helper_for_primitive_uint { + ($($uint: ident), *) => { + $( + impl CastUintFromFloatHelper for $uint { + const MAX: Self = Self::MAX; + const MIN: Self = Self::MIN; + } + )* + }; + } + +impl_cast_uint_from_float_helper_for_primitive_uint!(u8, u16, u32, u64, u128, usize); + +pub fn cast_uint_from_float(value: F) -> U +where + F: FloatCastHelper + core::fmt::Debug, + F::Mantissa: Bits, + ExpType: TryFrom, + U: CastUintFromFloatHelper + + CastFrom + + Shl + + core::fmt::Debug, + F::SignedExp: One + Neg, +{ + if value.is_nan() { + return U::ZERO; + } + let is_infinite = value.is_infinite(); // store this value first, as then we check if value is infinite after checking if it is negative, as we don't want to return MAX for negative infinity + let (sign, exp, mant) = value.into_normalised_signed_parts(); + if sign { + return U::MIN; + } + if is_infinite { + return U::MAX; + } + if mant == F::Mantissa::ZERO { + return U::ZERO; + } + if exp < -F::SignedExp::ONE { + // in this case, the value is at most a half, so we round (ties to even) to zero + return U::ZERO; + } + if exp == -F::SignedExp::ONE { + // exponent is -1, so value is in range [1/2, 1) + if mant.is_power_of_two() { + // in this case, the value is exactly 1/2, so we round (ties to even) to zero + return U::ZERO; + } + return U::ONE; + } + // now we know that the exponent is non-negative so can shift + // As per Rust's numeric casting semantics (https://doc.rust-lang.org/reference/expressions/operator-expr.html#numeric-cast), casting a float to an integer truncates rather than using ties to even + + match ExpType::try_from(exp) { + Ok(exp) => { + if exp >= U::BITS { + return U::MAX; + } + let mant_bit_width = mant.bits(); + if exp <= mant_bit_width - 1 { + // in this case, we have a fractional part to truncate + U::cast_from(mant >> (mant_bit_width - 1 - exp)) // the right shift means the mantissa now has exp + 1 bits, and as we must have exp < U::BITS, the shifted mantissa is no wider than U + } else { + U::cast_from(mant) << (exp - (mant_bit_width - 1)) + } + } + _ => U::MAX, + } +} diff --git a/src/cast.rs b/src/cast/mod.rs similarity index 97% rename from src/cast.rs rename to src/cast/mod.rs index 74b9a46..23fc44d 100644 --- a/src/cast.rs +++ b/src/cast/mod.rs @@ -1,13 +1,10 @@ //! Panic-free casting between numeric types. /// Backend implementation trait for panic-free casting between numeric types. - -// #[cfg_attr(feature = "nightly", const_trait)] pub trait CastFrom { fn cast_from(from: T) -> Self; } -// #[cfg_attr(feature = "nightly", const_trait)] #[cfg(test)] pub(crate) trait CastTo { fn cast_to(self) -> U; @@ -154,3 +151,5 @@ primitive_cast_impl!(bool as [u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, primitive_cast_impl!(char as [u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, char]); primitive_cast_impl!(u8 as [u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, f32, f64, char]); multiple_impls!(u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, f32, f64); + +pub mod float; \ No newline at end of file diff --git a/src/doc/bigint_helpers.rs b/src/doc/bigint_helpers.rs index c979f61..8190f22 100644 --- a/src/doc/bigint_helpers.rs +++ b/src/doc/bigint_helpers.rs @@ -8,4 +8,4 @@ macro_rules! impl_desc { pub(crate) use impl_desc; -doc::link_doc_comment!(carrying_add, borrowing_sub, widening_mul, carrying_mul); +doc::link_doc_comment_method!(carrying_add, borrowing_sub, widening_mul, carrying_mul); diff --git a/src/doc/checked.rs b/src/doc/checked.rs index d4e85ae..9ac6649 100644 --- a/src/doc/checked.rs +++ b/src/doc/checked.rs @@ -12,7 +12,7 @@ macro_rules! impl_desc { pub(crate) use impl_desc; -doc::link_doc_comment!( +doc::link_doc_comment_method!( checked_abs, checked_add, checked_add_signed, @@ -37,7 +37,7 @@ doc::link_doc_comment!( macro_rules! checked_next_power_of_two { ($sign: ident $bits: literal) => { doc::doc_comment! { - #checked_next_power_of_two, + #method.checked_next_power_of_two, $sign $bits, "Returns the smallest power of two greater than or equal to `self`. If the next power of two is greater than `Self::MAX`, `None` is returned, otherwise the power of two is wrapped in `Some`.", diff --git a/src/doc/classify.rs b/src/doc/classify.rs new file mode 100644 index 0000000..756a03f --- /dev/null +++ b/src/doc/classify.rs @@ -0,0 +1,18 @@ +macro_rules! impl_desc { + () => { + "Classification methods: used to determine properties about the storage of the number." + }; +} + +pub(crate) use impl_desc; + +crate::doc::link_doc_comment_method!( + is_sign_positive, + is_sign_negative, + is_finite, + is_infinite, + is_nan, + is_subnormal, + is_normal, + classify +); \ No newline at end of file diff --git a/src/doc/cmp.rs b/src/doc/cmp.rs new file mode 100644 index 0000000..62bd71d --- /dev/null +++ b/src/doc/cmp.rs @@ -0,0 +1,16 @@ +macro_rules! impl_desc { + () => { + "Comparison methods." + }; +} + +pub(crate) use impl_desc; + +crate::doc::link_doc_comment_method!( + max, + min, + maximum, + minimum, + clamp, + total_cmp +); \ No newline at end of file diff --git a/src/doc/consts.rs b/src/doc/consts.rs index 319ab3c..86ab88f 100644 --- a/src/doc/consts.rs +++ b/src/doc/consts.rs @@ -91,3 +91,20 @@ macro_rules! value_desc { } pub(crate) use value_desc; + +crate::doc::link_doc_comment_constant!( + RADIX, + MANTISSA_DIGITS, + DIGITS, + EPSILON, + MIN, + MIN_POSITIVE, + MAX, + MIN_EXP, + MAX_EXP, + // MIN_10_EXP, + // MAX_10_EXP, + NAN, + INFINITY, + NEG_INFINITY +); \ No newline at end of file diff --git a/src/doc/endian.rs b/src/doc/endian.rs index 0f9b06f..cf6fe0b 100644 --- a/src/doc/endian.rs +++ b/src/doc/endian.rs @@ -13,7 +13,7 @@ pub(crate) use impl_desc; macro_rules! from_be { ($sign: ident $bits: literal) => { doc::doc_comment! { - #from_be, + #method.from_be, $sign $bits, "Converts an integer from big endian to the target’s endianness." "On big endian this is a no-op. On little endian the bytes are swapped." @@ -26,7 +26,7 @@ pub(crate) use from_be; macro_rules! from_le { ($sign: ident $bits: literal) => { doc::doc_comment! { - #from_le, + #method.from_le, $sign $bits, "Converts an integer from little endian to the target’s endianness." "On little endian this is a no-op. On big endian the bytes are swapped." @@ -39,7 +39,7 @@ pub(crate) use from_le; macro_rules! to_be { ($sign: ident $bits: literal) => { doc::doc_comment! { - #to_be, + #method.to_be, $sign $bits, "Converts `self` from big endian to the target’s endianness." "On big endian this is a no-op. On little endian the bytes are swapped." @@ -52,7 +52,7 @@ pub(crate) use to_be; macro_rules! to_le { ($sign: ident $bits: literal) => { doc::doc_comment! { - #to_le, + #method.to_le, $sign $bits, "Converts `self` from little endian to the target’s endianness." "On little endian this is a no-op. On big endian the bytes are swapped." @@ -63,7 +63,7 @@ macro_rules! to_le { pub(crate) use to_le; #[cfg(feature = "nightly")] -crate::doc::link_doc_comment! { +crate::doc::link_doc_comment_method! { to_be_bytes, to_le_bytes, to_ne_bytes, diff --git a/src/doc/math.rs b/src/doc/math.rs new file mode 100644 index 0000000..8e60244 --- /dev/null +++ b/src/doc/math.rs @@ -0,0 +1,15 @@ +macro_rules! impl_desc { + () => { + "Bigint helper methods: common functions used to implement big integer arithmetic." + }; +} + +pub(crate) use impl_desc; + +crate::doc::link_doc_comment_method!( + abs, + sqrt, + div_euclid, + rem_euclid, + powi +); \ No newline at end of file diff --git a/src/doc/mod.rs b/src/doc/mod.rs index 9ab2334..e0fbb31 100644 --- a/src/doc/mod.rs +++ b/src/doc/mod.rs @@ -1,10 +1,14 @@ pub mod bigint_helpers; pub mod checked; +pub mod classify; +pub mod cmp; pub mod const_trait_fillers; pub mod consts; pub mod endian; +pub mod math; pub mod overflowing; pub mod radix; +pub mod rounding; pub mod saturating; pub mod strict; pub mod unchecked; @@ -22,6 +26,12 @@ macro_rules! must_use_op { () => { "this returns the result of the operation, without modifying the original" }; + (float) => { + "method returns a new number and does not mutate the original value" + }; + (comparison) => { + "this returns the result of the comparison, without modifying either input" + }; } pub(crate) use must_use_op; @@ -88,15 +98,18 @@ macro_rules! small_sign { (I) => { "i" }; + (F) => { + "f" + }; } pub(crate) use small_sign; macro_rules! doc_comment { - { $(# $method: ident, )? $sign: ident $bits: literal, $($($desc: expr)+)? $(, $($code: expr)+)? } => { + { $(# $item_type: ident . $method: ident, )? $sign: ident $bits: literal, $($($desc: expr)+)? $(, $($code: expr)+)? } => { concat!( $($("\n\n", $desc), +,)? - $("\n\n", "See also: .", )? + $("\n\n", "See also: .", )? $( doc::example_header!($sign $bits), $($code), +, @@ -106,13 +119,32 @@ macro_rules! doc_comment { } } -macro_rules! link_doc_comment { +macro_rules! link_doc_comment_method { + ($($name: ident), *) => { + $( + macro_rules! $name { + ($sign: ident) => { + doc::doc_comment! { + #method.$name, + $sign 256, + } + }; + } + + pub(crate) use $name; + )* + } +} + +pub(crate) use link_doc_comment_method; + +macro_rules! link_doc_comment_constant { ($($name: ident), *) => { $( macro_rules! $name { ($sign: ident) => { doc::doc_comment! { - #$name, + #associatedconstant.$name, $sign 256, } }; @@ -123,14 +155,14 @@ macro_rules! link_doc_comment { } } -pub(crate) use link_doc_comment; +pub(crate) use link_doc_comment_constant; pub(crate) use doc_comment; macro_rules! count_ones { ($sign: ident $bits: literal) => { doc::doc_comment! { - #count_ones, + #method.count_ones, $sign $bits, "Returns the number of ones in the binary representation of `self`.", @@ -145,7 +177,7 @@ pub(crate) use count_ones; macro_rules! count_zeros { ($sign: ident $bits: literal) => { doc::doc_comment! { - #count_zeros, + #method.count_zeros, $sign $bits, "Returns the number of zeros in the binary representation of `self`.", @@ -159,7 +191,7 @@ pub(crate) use count_zeros; macro_rules! leading_zeros { ($sign: ident $bits: literal) => { doc::doc_comment! { - #leading_zeros, + #method.leading_zeros, $sign $bits, "Returns the number of leading zeros in the binary representation of `self`.", @@ -174,7 +206,7 @@ pub(crate) use leading_zeros; macro_rules! trailing_zeros { ($sign: ident $bits: literal) => { doc::doc_comment! { - #trailing_zeros, + #method.trailing_zeros, $sign $bits, "Returns the number of trailing zeros in the binary representation of `self`.", @@ -189,7 +221,7 @@ pub(crate) use trailing_zeros; macro_rules! leading_ones { ($sign: ident $bits: literal, $c: ident) => { doc::doc_comment! { - #leading_ones, + #method.leading_ones, $sign $bits, "Returns the number of leading ones in the binary representation of `self`.", @@ -204,7 +236,7 @@ pub(crate) use leading_ones; macro_rules! trailing_ones { ($sign: ident $bits: literal) => { doc::doc_comment! { - #trailing_ones, + #method.trailing_ones, $sign $bits, "Returns the number of trailing ones in the binary representation of `self`.", @@ -219,7 +251,7 @@ pub(crate) use trailing_ones; macro_rules! rotate_left { ($sign: ident $bits: literal, $u: literal) => { doc::doc_comment! { - #rotate_left, + #method.rotate_left, $sign $bits, "Shifts the bits to the left by a specified amount, `n`, wrapping the truncated bits to the end of the resulting integer." "Please note this isn't the same operation as the `<<` shifting operator!" @@ -232,7 +264,7 @@ pub(crate) use rotate_left; macro_rules! rotate_right { ($sign: ident $bits: literal, $u: literal) => { doc::doc_comment! { - #rotate_right, + #method.rotate_right, $sign $bits, "Shifts the bits to the left by a specified amount, `n`, wrapping the truncated bits to the end of the resulting integer." "Please note this isn't the same operation as the `>>` shifting operator!" @@ -246,7 +278,7 @@ pub(crate) use rotate_right; macro_rules! swap_bytes { ($sign: ident $bits: literal, $u: literal) => { doc::doc_comment! { - #swap_bytes, + #method.swap_bytes, $sign $bits, "Reverses the byte order of the integer.", @@ -261,7 +293,7 @@ pub(crate) use swap_bytes; macro_rules! reverse_bits { ($sign: ident $bits: literal, $u: literal) => { doc::doc_comment! { - #reverse_bits, + #method.reverse_bits, $sign $bits, "Reverses the order of bits in the integer. The least significant bit becomes the most significant bit, second least-significant bit becomes second most-significant bit, etc.", @@ -276,7 +308,7 @@ pub(crate) use reverse_bits; macro_rules! pow { ($sign: ident $bits: literal) => { doc::doc_comment! { - #pow, + #method.pow, $sign $bits, "Raises `self` to the power of `exp`, using exponentiation by squaring.", @@ -291,7 +323,7 @@ pub(crate) use pow; macro_rules! next_power_of_two { ($sign: ident $bits: literal, $wrap: literal, $small: literal) => { doc::doc_comment! { - #next_power_of_two, + #method.next_power_of_two, $sign $bits, concat!("When return value overflows, it panics in debug mode and the return value is wrapped to ", $wrap, " in release mode (the only situation in which method can return ", $wrap, ")."), @@ -390,7 +422,7 @@ macro_rules! is_one { pub(crate) use is_one; -crate::doc::link_doc_comment! { +crate::doc::link_doc_comment_method! { unsigned_abs, div_euclid, rem_euclid, @@ -407,5 +439,8 @@ crate::doc::link_doc_comment! { is_negative, cast_signed, cast_unsigned, - midpoint + midpoint, + copysign, + next_up, + next_down } diff --git a/src/doc/overflowing.rs b/src/doc/overflowing.rs index 6c90d46..54581f3 100644 --- a/src/doc/overflowing.rs +++ b/src/doc/overflowing.rs @@ -6,7 +6,7 @@ macro_rules! impl_desc { pub(crate) use impl_desc; -crate::doc::link_doc_comment!( +crate::doc::link_doc_comment_method!( overflowing_abs, overflowing_add, overflowing_add_signed, diff --git a/src/doc/rounding.rs b/src/doc/rounding.rs new file mode 100644 index 0000000..3fd4095 --- /dev/null +++ b/src/doc/rounding.rs @@ -0,0 +1,15 @@ +macro_rules! impl_desc { + () => { + "Rounding methods." + }; +} + +pub(crate) use impl_desc; + +crate::doc::link_doc_comment_method!( + round, + ceil, + floor, + trunc, + fract +); \ No newline at end of file diff --git a/src/doc/saturating.rs b/src/doc/saturating.rs index f16fc69..b62ea03 100644 --- a/src/doc/saturating.rs +++ b/src/doc/saturating.rs @@ -6,7 +6,7 @@ macro_rules! impl_desc { pub(crate) use impl_desc; -crate::doc::link_doc_comment!( +crate::doc::link_doc_comment_method!( saturating_abs, saturating_add, saturating_add_signed, diff --git a/src/doc/strict.rs b/src/doc/strict.rs index c306961..c9659f6 100644 --- a/src/doc/strict.rs +++ b/src/doc/strict.rs @@ -6,7 +6,7 @@ macro_rules! impl_desc { pub(crate) use impl_desc; -crate::doc::link_doc_comment!( +crate::doc::link_doc_comment_method!( strict_abs, strict_add, strict_add_signed, diff --git a/src/doc/unchecked.rs b/src/doc/unchecked.rs index 409cf1a..7f0a79d 100644 --- a/src/doc/unchecked.rs +++ b/src/doc/unchecked.rs @@ -6,7 +6,7 @@ macro_rules! impl_desc { pub(crate) use impl_desc; -crate::doc::link_doc_comment!( +crate::doc::link_doc_comment_method!( unchecked_add, unchecked_mul, unchecked_shl, diff --git a/src/doc/wrapping.rs b/src/doc/wrapping.rs index c8a80a5..11f4c9d 100644 --- a/src/doc/wrapping.rs +++ b/src/doc/wrapping.rs @@ -6,7 +6,7 @@ macro_rules! impl_desc { pub(crate) use impl_desc; -crate::doc::link_doc_comment!( +crate::doc::link_doc_comment_method!( wrapping_abs, wrapping_add, wrapping_add_signed, @@ -27,7 +27,7 @@ crate::doc::link_doc_comment!( macro_rules! wrapping_next_power_of_two { ($sign: ident $bits: literal) => { doc::doc_comment! { - #wrapping_next_power_of_two, + #method.wrapping_next_power_of_two, $sign $bits, concat!("Returns the smallest power of two greater than or equal to `self`. If the next power of two is greater than `Self::MAX`, the return value is wrapped to `Self::MIN`."), diff --git a/src/float/cast.rs b/src/float/cast.rs index 6f181b2..2733d2c 100644 --- a/src/float/cast.rs +++ b/src/float/cast.rs @@ -1,11 +1,10 @@ -// TODO: implement casts from and to float for primitive types and buint, bint -use super::Float; +use crate::cast::float::{FloatCastHelper, FloatMantissa}; + +use super::{Float, FloatExponent}; use crate::cast::CastFrom; use crate::doc; use crate::{BUintD8, BUintD16, BUintD32, BUint, BIntD8, BIntD16, BIntD32, BInt}; use crate::ExpType; -use crate::buint::as_float::{CastToFloatConsts, cast_float_from_uint}; -use crate::buint::as_float; macro_rules! uint_as_float { ($($uint: ident $(<$N: ident>)?), *) => { @@ -14,7 +13,7 @@ macro_rules! uint_as_float { #[must_use = doc::must_use_op!()] #[inline] fn cast_from(from: $uint $(<$N>)?) -> Self { - cast_float_from_uint(from) + crate::cast::float::cast_float_from_uint(from) } } )* @@ -101,118 +100,130 @@ macro_rules! float_as_int { float_as_int!(i8; u8, i16; u16, i32; u32, i64; u64, i128; u128, isize; usize); -macro_rules! impl_mantissa_for_buint { - ($BUint: ident, $BInt: ident, $Digit: ident) => { - impl as_float::Mantissa for $BUint { - const ONE: Self = Self::ONE; - const TWO: Self = Self::TWO; - const MAX: Self = Self::MAX; - const BITS: ExpType = Self::BITS; - - #[inline] - fn bit(&self, n: ExpType) -> bool { - Self::bit(&self, n) - } - - #[inline] - fn shl(self, n: ExpType) -> Self { - Self::shl(self, n) - } - - #[inline] - fn shr(self, n: ExpType) -> Self { - Self::shr(self, n) - } - - #[inline] - fn add(self, rhs: Self) -> Self { - Self::add(self, rhs) - } - - #[inline] - fn sub(self, rhs: Self) -> Self { - Self::sub(self, rhs) - } - - #[inline] - fn leading_zeros(self) -> ExpType { - Self::leading_zeros(self) - } - - #[inline] - fn bitand(self, rhs: Self) -> Self { - Self::bitand(self, rhs) - } - - #[inline] - fn gt(&self, rhs: &Self) -> bool { - Self::gt(&self, &rhs) +macro_rules! float_as_uint { + ($($uint: ident $(<$N: ident>)?), *) => { + $( + impl CastFrom> for $uint $(<$N>)? { + #[must_use = doc::must_use_op!()] + #[inline] + fn cast_from(value: Float) -> Self { + crate::cast::float::cast_uint_from_float(value) + // uint_cast_from_float(from) + } } - } + )* }; } -pub(crate) use impl_mantissa_for_buint; - -crate::macro_impl!(impl_mantissa_for_buint); - -use crate::buint::float_as::{uint_cast_from_float, CastUintFromFloatHelper}; +float_as_uint!(BUintD8, BUintD16, BUintD32, BUint, u8, u16, u32, u64, u128, usize); -impl CastUintFromFloatHelper for Float { - type M = BUintD8; - type E = BIntD8; +impl FloatCastHelper for Float { + const BITS: ExpType = Self::BITS; + const MANTISSA_DIGITS: ExpType = Self::MANTISSA_DIGITS as ExpType; + const MAX_EXP: FloatExponent = Self::MAX_EXP; + const MIN_SUBNORMAL_EXP: FloatExponent = Self::MIN_SUBNORMAL_EXP; + const INFINITY: Self = Self::INFINITY; + const ZERO: Self = Self::ZERO; + const NEG_ZERO: Self = Self::NEG_ZERO; #[inline] fn is_nan(&self) -> bool { Self::is_nan(*self) } - #[inline] - fn is_sign_negative(&self) -> bool { - Self::is_sign_negative(*self) - } - #[inline] fn is_infinite(&self) -> bool { Self::is_infinite(*self) } +} - #[inline] - fn decode(self) -> (Self::M, Self::E) { - let (_, exp, mant) = self.to_parts_biased(); - let exp = BIntD8::from_bits(exp) - Self::EXP_BIAS - BIntD8::cast_from(Self::MB); - (mant, exp) +fn cast_float_from_float(f: T) -> U +where + T: FloatCastHelper, + U: FloatCastHelper, + U::Mantissa: CastFrom, + U::SignedExp: CastFrom, + T::SignedExp: CastFrom +{ + // deal with zero cases as this means mantissa must have leading one + let (sign, mut exponent, mantissa) = f.into_normalised_signed_parts(); + if mantissa == T::Mantissa::ZERO { + return if sign { + U::NEG_ZERO + } else { + U::ZERO + }; } -} + if exponent == T::MAX_EXP { // the float is either infinity or NaN + let out_mantissa = if T::MANTISSA_DIGITS <= U::MANTISSA_DIGITS { + U::Mantissa::cast_from(mantissa) << (U::MANTISSA_DIGITS - T::MANTISSA_DIGITS) + } else { + U::Mantissa::cast_from(mantissa >> (T::MANTISSA_DIGITS - U::MANTISSA_DIGITS)) + }; + return U::from_normalised_signed_parts(sign, U::MAX_EXP, out_mantissa); + } + let out_mantissa = if T::MANTISSA_DIGITS <= U::MANTISSA_DIGITS { // in this case, the mantissa can be converted exactly + U::Mantissa::cast_from(mantissa) << (U::MANTISSA_DIGITS - T::MANTISSA_DIGITS) + } else { + let (e, m) = T::round_exponent_mantissa::(exponent, mantissa, T::MANTISSA_DIGITS - U::MANTISSA_DIGITS); + exponent = e; -impl CastToFloatConsts for Float { - type M = BUintD8; + U::Mantissa::cast_from(m) + }; - const ZERO: Self = Self::ZERO; - const MANTISSA_DIGITS: ExpType = Self::MANTISSA_DIGITS as ExpType; - const MAX_EXP: Self::M = Self::MAX_EXP.to_bits(); - const INFINITY: Self = Self::INFINITY; + let out_exponent = if T::EXPONENT_BITS <= U::EXPONENT_BITS { // in this case, we will never have overflow or underflow + U::SignedExp::cast_from(exponent) + } else { + if T::SignedExp::cast_from(U::MAX_EXP) <= exponent { // exponent is too large to fit into output exponent + return if sign { + -U::INFINITY + } else { + U::INFINITY + }; + } + if exponent < T::SignedExp::cast_from(U::MIN_SUBNORMAL_EXP) { + return if sign { + U::NEG_ZERO + } else { + U::ZERO + }; + } + U::SignedExp::cast_from(exponent) + }; + U::from_normalised_signed_parts(sign, out_exponent, out_mantissa) +} - fn from_raw_parts(exp: Self::M, mant: Self::M) -> Self { - Self::from_raw_parts(false, exp, mant & Self::MANTISSA_MASK) +impl CastFrom> for Float { + #[must_use = doc::must_use_op!()] + #[inline] + fn cast_from(from: Float) -> Self { + cast_float_from_float(from) } } -macro_rules! float_as_uint { - ($($uint: ident $(<$N: ident>)?), *) => { +macro_rules! primitive_and_big_float_cast { + ($($primitive_float_type: ty), *) => { $( - impl CastFrom> for $uint $(<$N>)? { + impl CastFrom<$primitive_float_type> for Float { + #[must_use = doc::must_use_op!()] + #[inline] + fn cast_from(from: $primitive_float_type) -> Self { + cast_float_from_float(from) + } + } + + impl CastFrom> for $primitive_float_type { #[must_use = doc::must_use_op!()] #[inline] fn cast_from(from: Float) -> Self { - uint_cast_from_float(from) + cast_float_from_float(from) } } )* }; } -float_as_uint!(BUintD8, BUintD16, BUintD32, BUint, u8, u16, u32, u64, u128, usize); +primitive_and_big_float_cast!(f32, f64); #[cfg(test)] mod tests { @@ -222,23 +233,27 @@ mod tests { use crate::test::types::{ftest, FTEST}; use crate::test::cast_types::*; - #[test] - fn test_cast() { - let a = 1234034598347589374u128; - let b = FTEST::cast_from(a); - let c = ftest::cast_from(a); - assert_eq!(b, c.into()); - } - test_from! { function: ::cast_from, - from_types: (u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, UTESTD8, UTESTD16, UTESTD32, UTESTD64, TestUint1, TestUint2, TestUint3, TestUint4, TestUint5, TestUint6, TestUint7, TestUint8, TestUint9, TestUint10, ITESTD8, ITESTD16, ITESTD32, ITESTD64, TestInt1, TestInt2, TestInt3, TestInt4, TestInt5, TestInt6, TestInt7, TestInt8) + from_types: (u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, f32, f64, UTESTD8, UTESTD16, UTESTD32, UTESTD64, TestUint1, TestUint2, TestUint3, TestUint4, TestUint5, TestUint6, TestUint7, TestUint8, TestUint9, TestUint10, ITESTD8, ITESTD16, ITESTD32, ITESTD64, TestInt1, TestInt2, TestInt3, TestInt4, TestInt5, TestInt6, TestInt7, TestInt8, TestInt9, TestInt10) } test_into! { function: ::cast_to, - into_types: (u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize) + into_types: (u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, f32, f64) + } + + #[test] + fn test_cast_float() { + use crate::cast::As; + let f1 = FTEST::from_bits(3472883712u32.as_()); + let f2 = f32::from_bits(3472883712u32); + dbg!(f2); + let u1 = u32::cast_from(f1); + let u2 = u32::cast_from(f2); + println!("{:?}", u1); + println!("{:?}", u2); } - crate::int::cast::test_cast_to_bigint!(ftest; UTESTD8, UTESTD16, UTESTD32, UTESTD64, TestUint1, TestUint2, TestUint3, TestUint4, TestUint5, TestUint6, TestUint7, TestUint8, ITESTD8, ITESTD16, ITESTD32, ITESTD64, TestInt1, TestInt2, TestInt3, TestInt4, TestInt5, TestInt6, TestInt7, TestInt8); + // crate::int::cast::test_cast_to_bigint!(ftest; UTESTD8, UTESTD16, UTESTD32, UTESTD64, TestUint1, TestUint2, TestUint3, TestUint4, TestUint5, TestUint6, TestUint7, TestUint8, ITESTD8, ITESTD16, ITESTD32, ITESTD64, TestInt1, TestInt2, TestInt3, TestInt4, TestInt5, TestInt6, TestInt7, TestInt8); } diff --git a/src/float/classify.rs b/src/float/classify.rs index c572dd9..0865bdf 100644 --- a/src/float/classify.rs +++ b/src/float/classify.rs @@ -1,27 +1,32 @@ use super::Float; use crate::BUintD8; use core::num::FpCategory; +use crate::doc; type Digit = u8; struct Masks; impl Masks { - //const Q_NAN_MASK: BUintD8 = Float::::NAN.to_bits(); const FINITE_MASK: BUintD8 = Float::::INFINITY.to_bits(); } +#[doc = doc::classify::impl_desc!()] impl Float { - #[inline] + #[doc = doc::classify::is_sign_positive!(F)] + #[inline(always)] pub const fn is_sign_positive(self) -> bool { !self.is_sign_negative() } - #[inline] + #[doc = doc::classify::is_sign_negative!(F)] + #[inline(always)] pub const fn is_sign_negative(self) -> bool { - self.to_int().is_negative() + const A: Digit = (1 as Digit).rotate_right(1); + self.bits.digits[W - 1] >= A } + #[doc = doc::classify::is_finite!(F)] #[inline] pub const fn is_finite(self) -> bool { self.to_bits() @@ -29,44 +34,26 @@ impl Float { .ne(&Masks::::FINITE_MASK) } + #[doc = doc::classify::is_infinite!(F)] #[inline] pub const fn is_infinite(self) -> bool { self.abs().to_bits().eq(&Masks::::FINITE_MASK) - /*let bits = self.abs().to_bits(); - bits.trailing_zeros() == Self::MB && bits.count_ones() == Self::EXPONENT_BITS as ExpType*/ } - /*#[inline] - pub const fn is_quiet_indefinite_nan(self) -> bool { - self == Self::NAN - }*/ - //} - + #[doc = doc::classify::is_nan!(F)] #[inline] pub const fn is_nan(self) -> bool { - //!(self.mantissa().is_zero() || self.is_finite()) !self.is_finite() && self.to_bits().trailing_zeros() < Self::MB } - //crate::nightly::const_fns! { - /*#[inline] - pub const fn is_quiet_nan(self) -> bool { - self.to_bits() & Masks::::Q_NAN_MASK == Masks::::Q_NAN_MASK - } - - #[inline] - pub const fn is_signalling_nan(self) -> bool { - self.to_bits() & Masks::::Q_NAN_MASK == Self::INFINITY.to_bits() - }*/ - //} - + #[doc = doc::classify::is_subnormal!(F)] #[inline] pub const fn is_subnormal(self) -> bool { - /*!self.is_zero() && self.exponent().is_zero()*/ let lz = self.abs().to_bits().leading_zeros(); lz < Self::BITS && lz > Self::EXPONENT_BITS } + #[doc = doc::classify::is_normal!(F)] #[inline] pub const fn is_normal(self) -> bool { matches!(self.classify(), FpCategory::Normal) @@ -74,34 +61,34 @@ impl Float { #[inline] pub const fn is_zero(&self) -> bool { + let words = self.words(); let mut i = 0; while i < W - 1 { - if self.words()[i] != 0 { + if words[i] != 0 { return false; } i += 1; } - let last = self.words()[W - 1]; + let last = words[W - 1]; last.trailing_zeros() >= Digit::BITS - 1 } - crate::nightly::const_fn! { - #[inline] - pub const fn classify(self) -> FpCategory { - let u = self.abs().to_bits(); + #[doc = doc::classify::classify!(F)] + #[inline] + pub const fn classify(self) -> FpCategory { + let u = self.abs().to_bits(); + if u.is_zero() { + FpCategory::Zero + } else if u.eq(&Self::INFINITY.to_bits()) { + FpCategory::Infinite + } else { + let u = u.bitand(Masks::::FINITE_MASK); if u.is_zero() { - FpCategory::Zero - } else if u.eq(&Self::INFINITY.to_bits()) { - FpCategory::Infinite + FpCategory::Subnormal + } else if u.eq(&Masks::::FINITE_MASK) { + FpCategory::Nan } else { - let u = u.bitand(Masks::::FINITE_MASK); - if u.is_zero() { - FpCategory::Subnormal - } else if u.eq(&Masks::::FINITE_MASK) { - FpCategory::Nan - } else { - FpCategory::Normal - } + FpCategory::Normal } } } diff --git a/src/float/cmp.rs b/src/float/cmp.rs index 669dfc7..a2d0abc 100644 --- a/src/float/cmp.rs +++ b/src/float/cmp.rs @@ -1,32 +1,38 @@ use super::Float; -use crate::{BIntD8, BUintD8}; +use crate::BIntD8; +use crate::doc; use core::cmp::{Ordering, PartialEq, PartialOrd}; +#[doc = doc::cmp::impl_desc!()] impl Float { - crate::nightly::const_fns! { - #[inline] - pub const fn max(self, other: Self) -> Self { - handle_nan!(other; self); - handle_nan!(self; other); - if let Ordering::Less = self.total_cmp(&other) { - other - } else { - self - } + #[doc = doc::cmp::max!(F)] + #[must_use = doc::must_use_op!(comparison)] + #[inline] + pub const fn max(self, other: Self) -> Self { + handle_nan!(other; self); + handle_nan!(self; other); + if let Ordering::Less = self.total_cmp(&other) { + other + } else { + self } + } - #[inline] - pub const fn min(self, other: Self) -> Self { - handle_nan!(other; self); - handle_nan!(self; other); - if let Ordering::Greater = self.total_cmp(&other) { - other - } else { - self - } + #[doc = doc::cmp::min!(F)] + #[must_use = doc::must_use_op!(comparison)] + #[inline] + pub const fn min(self, other: Self) -> Self { + handle_nan!(other; self); + handle_nan!(self; other); + if let Ordering::Greater = self.total_cmp(&other) { + other + } else { + self } } + #[doc = doc::cmp::maximum!(F)] + #[must_use = doc::must_use_op!(comparison)] #[inline] pub const fn maximum(self, other: Self) -> Self { handle_nan!(self; self); @@ -38,6 +44,8 @@ impl Float { } } + #[doc = doc::cmp::minimum!(F)] + #[must_use = doc::must_use_op!(comparison)] #[inline] pub const fn minimum(self, other: Self) -> Self { handle_nan!(self; self); @@ -49,25 +57,27 @@ impl Float { } } - //crate::nightly::const_fns! { + #[doc = doc::cmp::clamp!(F)] + #[must_use = doc::must_use_op!(float)] #[inline] - pub fn clamp(self, min: Self, max: Self) -> Self { - assert!(min <= max); + pub const fn clamp(self, min: Self, max: Self) -> Self { + assert!(min.le(&max)); let mut x = self; - if x < min { + if x.lt(&min) { x = min; } - if x > max { + if x.gt(&max) { x = max; } x } - //} - + + #[doc = doc::cmp::total_cmp!(F)] + #[must_use] #[inline] pub const fn total_cmp(&self, other: &Self) -> Ordering { - let left = self.to_int(); - let right = other.to_int(); + let left = self.to_signed_bits(); + let right = other.to_signed_bits(); if left.is_negative() && right.is_negative() { BIntD8::cmp(&left, &right).reverse() } else { @@ -80,8 +90,7 @@ crate::nightly::impl_const! { impl const PartialEq for Float { #[inline] fn eq(&self, other: &Self) -> bool { - handle_nan!(false; self, other); - (self.is_zero() && other.is_zero()) || BUintD8::eq(&self.to_bits(), &other.to_bits()) + Self::eq(&self, other) } } } @@ -90,11 +99,7 @@ crate::nightly::impl_const! { impl const PartialOrd for Float { #[inline] fn partial_cmp(&self, other: &Self) -> Option { - handle_nan!(None; self, other); - if self.is_zero() && other.is_zero() { - return Some(Ordering::Equal); - } - Some(self.total_cmp(other)) + Self::partial_cmp(&self, other) } } } @@ -124,7 +129,6 @@ mod tests { function: ::clamp(a: ftest, b: ftest, c: ftest), skip: !(b <= c) } - test_bignum! { function: ::total_cmp(a: ref &ftest, b: ref &ftest) } diff --git a/src/float/const_trait_fillers.rs b/src/float/const_trait_fillers.rs new file mode 100644 index 0000000..e6a1ae4 --- /dev/null +++ b/src/float/const_trait_fillers.rs @@ -0,0 +1,55 @@ +use crate::doc; +use crate::BUintD8; +use super::Float; +use core::cmp::Ordering; + +#[doc = doc::const_trait_fillers::impl_desc!()] +impl Float { + #[inline] + pub const fn eq(&self, other: &Self) -> bool { + handle_nan!(false; self, other); + (self.is_zero() && other.is_zero()) || BUintD8::eq(&self.to_bits(), &other.to_bits()) + } + + #[inline] + pub const fn ne(&self, other: &Self) -> bool { + !Self::eq(&self, other) + } + + #[inline] + pub const fn partial_cmp(&self, other: &Self) -> Option { + handle_nan!(None; self, other); + if self.is_zero() && other.is_zero() { + return Some(Ordering::Equal); + } + Some(self.total_cmp(other)) + } + + #[inline] + pub const fn lt(&self, other: &Self) -> bool { + matches!(self.partial_cmp(&other), Some(Ordering::Less)) + } + + #[inline] + pub const fn le(&self, other: &Self) -> bool { + matches!(self.partial_cmp(&other), Some(Ordering::Less | Ordering::Equal)) + } + + #[inline] + pub const fn gt(&self, other: &Self) -> bool { + matches!(self.partial_cmp(&other), Some(Ordering::Greater)) + } + + #[inline] + pub const fn ge(&self, other: &Self) -> bool { + matches!(self.partial_cmp(&other), Some(Ordering::Greater | Ordering::Equal)) + } + + #[inline] + pub(crate) const fn neg(mut self) -> Self { + type Digit = u8; + + self.bits.digits[W - 1] ^= 1 << (Digit::BITS - 1); + self + } +} \ No newline at end of file diff --git a/src/float/consts.rs b/src/float/consts.rs index 2a56db5..bfc7bb6 100644 --- a/src/float/consts.rs +++ b/src/float/consts.rs @@ -1,6 +1,6 @@ -use super::Float; -use crate::bint::BIntD8; +use super::{Float, FloatExponent, UnsignedFloatExponent}; use crate::buint::BUintD8; +use crate::doc; const fn buint_from_usize(u: usize) -> BUintD8 { const UINT_BITS: usize = ::BITS as usize; @@ -16,20 +16,27 @@ const fn buint_from_usize(u: usize) -> BUintD8 { out } +#[doc = doc::consts::impl_desc!()] impl Float { + #[doc = doc::consts::RADIX!(F)] pub const RADIX: u32 = 2; + #[doc = doc::consts::MANTISSA_DIGITS!(F)] pub const MANTISSA_DIGITS: u32 = MB as u32 + 1; + #[doc = doc::consts::DIGITS!(F)] pub const DIGITS: u32 = BUintD8::::ONE.wrapping_shl(Self::MB).ilog10() as u32; - pub const EPSILON: Self = { - let u = Self::EXP_BIAS.to_bits().sub(buint_from_usize::(MB)); - Self::from_bits(u.shl(Self::MB)) - }; + pub(crate) const MB_AS_FLOAT_EXP: FloatExponent = Self::MB as FloatExponent; + + #[doc = doc::consts::EPSILON!(F)] + pub const EPSILON: Self = Self::normal_power_of_two(-Self::MB_AS_FLOAT_EXP); + + pub(crate) const HALF_EPSILON: Self = Self::normal_power_of_two(-(Self::MB as FloatExponent + 1)); - pub const EXP_BIAS: BIntD8 = BIntD8::MAX.wrapping_shr(Self::MB + 1); + pub const EXP_BIAS: FloatExponent = (1 << (Self::EXPONENT_BITS - 1)) - 1;// UnsignedFloatExponent::MAX.wrapping_shr(Self::MB + 1) as _; + #[doc = doc::consts::MIN!(F)] pub const MIN: Self = { let mut e = BUintD8::MAX; e = e.wrapping_shr(Self::MB + 1); @@ -39,22 +46,38 @@ impl Float { Self::from_bits(e.bitor(m)) }; - pub const MIN_POSITIVE: Self = { Self::from_bits(BUintD8::ONE.wrapping_shl(Self::MB)) }; + #[doc = doc::consts::MIN_POSITIVE!(F)] + pub const MIN_POSITIVE: Self = Self::from_bits(BUintD8::ONE.wrapping_shl(Self::MB)); + pub const MAX_NEGATIVE: Self = Self::MIN_POSITIVE.neg(); + + #[doc = doc::consts::MAX!(F)] pub const MAX: Self = Self::MIN.abs(); - pub const MIN_EXP: BIntD8 = (Self::EXP_BIAS.neg()).wrapping_add(BIntD8::ONE.wrapping_shl(1)); - pub const MAX_EXP: BIntD8 = Self::EXP_BIAS.wrapping_add(BIntD8::ONE); - pub const MAX_UNBIASED_EXP: BUintD8 = Self::EXP_BIAS.to_bits().shl(1); // mul by 2 + + #[doc = doc::consts::MIN_EXP!(F)] + pub const MIN_EXP: FloatExponent = -Self::EXP_BIAS + 2; + + pub(crate) const MIN_SUBNORMAL_EXP: FloatExponent = -Self::EXP_BIAS + 1 - Self::MB as FloatExponent; // TODO: need to check that this fits into FloatExponent + + #[doc = doc::consts::MAX_EXP!(F)] + pub const MAX_EXP: FloatExponent = Self::EXP_BIAS + 1; + + pub const MAX_UNBIASED_EXP: UnsignedFloatExponent = (Self::EXP_BIAS as UnsignedFloatExponent) * 2; + pub const MIN_10_EXP: Self = todo!(); + pub const MAX_10_EXP: Self = todo!(); - pub const MAX_SUBNORMAL: Self = - Self::from_bits(BUintD8::MAX.wrapping_shr(Self::EXPONENT_BITS + 1)); + pub const MAX_SUBNORMAL: Self = Self::from_bits(BUintD8::MAX.wrapping_shr(Self::EXPONENT_BITS + 1)); + pub const MIN_SUBNORMAL: Self = Self::MAX_SUBNORMAL.neg(); + pub const MIN_POSITIVE_SUBNORMAL: Self = Self::from_bits(BUintD8::ONE); + pub const MAX_NEGATIVE_SUBNORMAL: Self = Self::MIN_POSITIVE_SUBNORMAL.neg(); + #[doc = doc::consts::NAN!(F)] pub const NAN: Self = { let mut u = BUintD8::MAX; u = u.wrapping_shl(1); @@ -63,15 +86,16 @@ impl Float { Self::from_bits(u) }; - pub const QNAN: Self = { - let bits = Self::NAN.to_bits(); - Self::from_bits(bits.bitor(BUintD8::ONE.shl(Self::MB - 1))) - }; + // pub const QNAN: Self = { + // let bits = Self::NAN.to_bits(); + // Self::from_bits(bits.bitor(BUintD8::ONE.shl(Self::MB - 1))) + // }; pub const NEG_NAN: Self = Self::NAN.neg(); - pub const NEG_QNAN: Self = Self::QNAN.neg(); + // pub const NEG_QNAN: Self = Self::QNAN.neg(); + #[doc = doc::consts::INFINITY!(F)] pub const INFINITY: Self = { let mut u = BUintD8::MAX; u = u.wrapping_shl(1); @@ -80,6 +104,7 @@ impl Float { Self::from_bits(u) }; + #[doc = doc::consts::NEG_INFINITY!(F)] pub const NEG_INFINITY: Self = { let mut u = BUintD8::MAX; u = u.wrapping_shr(Self::MB); @@ -89,32 +114,17 @@ impl Float { pub const ZERO: Self = Self::from_bits(BUintD8::ZERO); - pub const NEG_ZERO: Self = Self::from_words(BIntD8::::MIN.bits.digits); + pub const NEG_ZERO: Self = Self::ZERO.neg(); - pub const ONE: Self = { - let mut u = BUintD8::MAX; - u = u.wrapping_shl(2); - u = u.wrapping_shr(2 + Self::MB); - u = u.wrapping_shl(Self::MB); - Self::from_bits(u) - }; + pub const ONE: Self = Self::normal_power_of_two(0); - pub const TWO: Self = { - let (_, exp, _) = Self::ONE.to_raw_parts(); - Self::from_exp_mant(false, exp.add(BUintD8::ONE), BUintD8::ZERO) - }; + pub const TWO: Self = Self::normal_power_of_two(1); - pub const HALF: Self = { - let (_, exp, _) = Self::ONE.to_raw_parts(); - Self::from_exp_mant(false, exp.sub(BUintD8::ONE), BUintD8::ZERO) - }; + pub const HALF: Self = Self::normal_power_of_two(-1); - pub const QUARTER: Self = { - let (_, exp, _) = Self::ONE.to_raw_parts(); - Self::from_exp_mant(false, exp.sub(BUintD8::TWO), BUintD8::ZERO) - }; + pub const QUARTER: Self = Self::normal_power_of_two(-2); - pub const NEG_ONE: Self = Self::from_bits(Self::ONE.bits.bitor(Self::NEG_ZERO.bits)); + pub const NEG_ONE: Self = Self::ONE.neg(); } #[cfg(test)] @@ -158,11 +168,11 @@ mod tests { } // don't test NAN as Rust f64/f32 NAN bit pattern not guaranteed to be stable across version - #[test] - fn nan_consts_is_nan() { - assert!(FTEST::NAN.is_nan()); - assert!(FTEST::QNAN.is_nan()); - } + // #[test] + // fn nan_consts_is_nan() { + // assert!(FTEST::NAN.is_nan()); + // assert!(FTEST::QNAN.is_nan()); + // } test_numeric_constants![ (ZERO, 0.0), @@ -170,15 +180,17 @@ mod tests { (ONE, 1.0), (QUARTER, 0.25), (HALF, 0.5), - (NEG_ONE, -1) + (NEG_ONE, -1), + (TWO, 2) ]; + test_constant!(F64::BITS == 64 as ExpType); test_constant!(F32::BITS == 32 as ExpType); test_constant!(F64::EXPONENT_BITS == 11 as ExpType); test_constant!(F32::EXPONENT_BITS == 8 as ExpType); - test_constant!(F64::EXP_BIAS == 1023i64); - test_constant!(F32::EXP_BIAS == 127i32); - test_constant!(F64::MAX_UNBIASED_EXP == 2046u64); - test_constant!(F32::MAX_UNBIASED_EXP == 254u32); + test_constant!(F64::EXP_BIAS == 1023i128); + test_constant!(F32::EXP_BIAS == 127i128); + test_constant!(F64::MAX_UNBIASED_EXP == 2046u128); + test_constant!(F32::MAX_UNBIASED_EXP == 254u128); } diff --git a/src/float/convert.rs b/src/float/convert.rs index a350ed9..dd485e8 100644 --- a/src/float/convert.rs +++ b/src/float/convert.rs @@ -1,5 +1,20 @@ -use super::Float; -use crate::BUintD8; +use super::{Float, FloatExponent, UnsignedFloatExponent}; +use crate::{BIntD8, BUintD8, ExpType}; +use crate::cast::float::ConvertFloatParts; + +type Digit = u8; + +impl BUintD8 { + #[inline] + pub(crate) const fn is_even(&self) -> bool { + self.digits[0] & 1 == 0 + } + + #[inline] + pub(crate) const fn is_odd(&self) -> bool { + !self.is_even() + } +} impl Float { #[inline(always)] @@ -11,6 +26,236 @@ impl Float { pub const fn from_bits(v: BUintD8) -> Self { Self { bits: v } } + + #[inline(always)] + pub(crate) const fn from_words(words: [Digit; W]) -> Self { + Self::from_bits(BUintD8::from_digits(words)) + } + + #[inline(always)] + pub(crate) const fn words(&self) -> &[Digit; W] { + &self.bits.digits + } + + #[inline(always)] + pub(crate) const fn to_signed_bits(self) -> BIntD8 { + BIntD8::from_bits(self.to_bits()) + } +} + +impl ConvertFloatParts for Float { + type Mantissa = BUintD8; + type SignedExp = FloatExponent; + type UnsignedExp = UnsignedFloatExponent; + + #[inline] + fn into_raw_parts(self) -> (bool, Self::UnsignedExp, Self::Mantissa) { + Self::into_raw_parts(self) + } + + #[inline] + fn into_biased_parts(self) -> (bool, Self::UnsignedExp, Self::Mantissa) { + Self::into_biased_parts(self) + } + + #[inline] + fn into_signed_biased_parts(self) -> (bool, Self::SignedExp, Self::Mantissa) { + Self::into_signed_biased_parts(self) + } + + #[inline] + fn into_signed_parts(self) -> (bool, Self::SignedExp, Self::Mantissa) { + Self::into_signed_parts(self) + } + + #[inline] + fn into_normalised_signed_parts(self) -> (bool, Self::SignedExp, Self::Mantissa) { + Self::into_normalised_signed_parts(self) + } + + #[inline] + fn from_raw_parts(sign: bool, exponent: Self::UnsignedExp, mantissa: Self::Mantissa) -> Self { + Self::from_raw_parts(sign, exponent, mantissa) + } + + #[inline] + fn from_biased_parts(sign: bool, exponent: Self::UnsignedExp, mantissa: Self::Mantissa) -> Self { + Self::from_biased_parts(sign, exponent, mantissa) + } + + #[inline] + fn from_signed_biased_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self { + Self::from_signed_biased_parts(sign, exponent, mantissa) + } + + #[inline] + fn from_signed_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self { + Self::from_signed_parts(sign, exponent, mantissa) + } + + #[inline] + fn round_exponent_mantissa(exponent: Self::SignedExp, mantissa: Self::Mantissa, shift: ExpType) -> (Self::SignedExp, Self::Mantissa) { + Self::round_exponent_mantissa::(exponent, mantissa, shift) + } + + #[inline] + fn from_normalised_signed_parts(sign: bool, exponent: Self::SignedExp, mantissa: Self::Mantissa) -> Self { + Self::from_normalised_signed_parts(sign, exponent, mantissa) + } +} + +impl Float { + const _ASSERT_1: () = assert!(Self::EXPONENT_BITS <= 127); + const _ASSERT_2: () = assert!(Self::MIN_EXP.checked_sub(MB as FloatExponent + 1).is_some()); // ensures into_normalised_signed_parts won't panic + + // split into sign, exponent and mantissa + #[inline] + pub(crate) const fn into_raw_parts(self) -> (bool, UnsignedFloatExponent, BUintD8) { + let sign = self.is_sign_negative(); + let exp = self.bits.bitand(Self::SIGN_MASK).shr(Self::MB); + let mant = self.bits.bitand(Self::MANTISSA_MASK); + + (sign, exp.cast_to_unsigned_float_exponent(), mant) + } + + /// construct float from sign, exponent and mantissa + #[inline] + pub(crate) const fn from_raw_parts(sign: bool, exponent: UnsignedFloatExponent, mantissa: BUintD8) -> Self { + debug_assert!(mantissa.bits() <= Self::MB); + let mut bits = BUintD8::cast_from_unsigned_float_exponent(exponent).shl(Self::MB).bitor(mantissa); + if sign { + bits.digits[W - 1] |= 1 << (Digit::BITS - 1); + } + Self::from_bits(bits) + } + + /// split into sign, exponent and mantissa and adjust to reflect actual numerical represenation, but without taking exponent bias into account + #[inline] + pub(crate) const fn into_biased_parts(self) -> (bool, UnsignedFloatExponent, BUintD8) { + let (sign, exp, mant) = self.into_raw_parts(); + if exp == 0 { + (sign, 1, mant) + } else { + (sign, exp, mant.bitor(Self::MANTISSA_IMPLICIT_LEADING_ONE_MASK)) + } + } + + #[inline] + pub(crate) const fn from_biased_parts(sign: bool, mut exponent: UnsignedFloatExponent, mut mantissa: BUintD8) -> Self { + debug_assert!(exponent != 0); // exponent should not be zero as should be 1 for subnormal numbers + if mantissa.bit(Self::MB) { + mantissa = mantissa.bitxor(Self::MANTISSA_IMPLICIT_LEADING_ONE_MASK); // remove the implicit bit from the mantissa + } else { + debug_assert!(exponent == 1); // number is subnormal so exponent should be 1 + exponent = 0; + } + Self::from_raw_parts(sign, exponent, mantissa) + } + + #[inline] + pub(crate) const fn into_signed_biased_parts(self) -> (bool, i128, BUintD8) { + let (sign, exp, mant) = self.into_biased_parts(); + (sign, exp as i128, mant) + } + + #[inline] + pub(crate) const fn from_signed_biased_parts(sign: bool, exponent: FloatExponent, mantissa: BUintD8) -> Self { + debug_assert!(!exponent.is_negative()); + let exponent = exponent as UnsignedFloatExponent; + Self::from_biased_parts(sign, exponent, mantissa) + } + + #[inline] + pub(crate) const fn into_signed_parts(self) -> (bool, FloatExponent, BUintD8) { + let (sign, exp, mant) = self.into_signed_biased_parts(); + (sign, exp - Self::EXP_BIAS, mant) + } + + #[inline] + pub(crate) const fn from_signed_parts(sign: bool, exponent: FloatExponent, mantissa: BUintD8) -> Self { + let exponent = exponent + Self::EXP_BIAS; + Self::from_signed_biased_parts(sign, exponent, mantissa) + } + + /// mantissa is normalised so that it is always of the form 1.*...* + #[inline] + pub(crate) const fn into_normalised_signed_parts(self) -> (bool, FloatExponent, BUintD8) { + let (sign, exp, mant) = self.into_signed_parts(); + let shift = Self::MB + 1 - mant.bits(); + if shift == 0 { + (sign, exp, mant) + } else { + let normalised_mant = unsafe { + mant.unchecked_shl_internal(shift) + }; // SAFETY: we can use unchecked variant since shift is <= Self::MB + 1 < number of bits of float + debug_assert!(normalised_mant.is_zero() || normalised_mant.bits() == Self::MB + 1); + let normalised_exp = exp - (shift as FloatExponent); + + (sign, normalised_exp, normalised_mant) + } + } + + #[inline] + pub(crate) const fn round_exponent_mantissa(mut exponent: FloatExponent, mantissa: BUintD8, shift: ExpType) -> (FloatExponent, BUintD8) { + // we allow current_width to be specified so that we don't have to recompute mantissa.bits() if already known + let mut shifted_mantissa = unsafe { + mantissa.unchecked_shr_pad_internal::(shift) + }; + if !TIES_EVEN { + return (exponent, shifted_mantissa); // if not TIES_EVEN, then we truncate + } + let discarded_shifted_bits = mantissa.bitand(BUintD8::MAX.shr(Self::BITS - shift)); + if discarded_shifted_bits.bit(shift - 1) { // in this case, the discarded portion is at least a half + if shifted_mantissa.is_odd() || !discarded_shifted_bits.is_power_of_two() { // in this case, ties to even says we round up. checking if not a power of two tells us that there is at least one bit set to 1 (after the most significant bit set to 1). we check in this order as is_odd is O(1) whereas is_power_of_two is O(N) + shifted_mantissa = shifted_mantissa.add(BUintD8::ONE); + if shifted_mantissa.bit(shift) { // check for overflow (with respect to the mantissa bit width) + exponent += 1; + shifted_mantissa = unsafe { + shifted_mantissa.unchecked_shr_pad_internal::(1) + }; + } + } + } + (exponent, shifted_mantissa) + } + + #[inline] + pub(crate) const fn from_normalised_signed_parts(sign: bool, exponent: FloatExponent, mantissa: BUintD8) -> Self { + debug_assert!(mantissa.is_zero() || mantissa.bits() == Self::MB + 1); + + if exponent < Self::MIN_EXP - 1 { + let shift = (Self::MIN_EXP - 1 - exponent) as ExpType; + let (out_exponent, out_mantissa) = Self::round_exponent_mantissa::(Self::MIN_EXP - 1, mantissa, shift); + + Self::from_signed_parts(sign, out_exponent, out_mantissa) + } else { + Self::from_signed_parts(sign, exponent, mantissa) + } + } + + #[inline] + pub(crate) const fn signed_biased_exponent(self) -> FloatExponent { + self.into_signed_biased_parts().1 + } +} + +#[cfg(test)] +macro_rules! test_reversible_conversion { + ($to: ident, $from: ident ($($param: ident), *) -> $dest_type: ident $(, $prop: path)?) => { + paste::paste! { + quickcheck::quickcheck! { + fn [](v: $dest_type) -> quickcheck::TestResult { + if !v.is_finite() { + return quickcheck::TestResult::discard(); + } + let ($($param), *) = <$dest_type>::$to(v); + let c_from = <$dest_type>::$from($($param), *); + // assert_eq!($($prop)?(c_from), $($prop)?(v)); + quickcheck::TestResult::from_bool($($prop)?(c_from) == $($prop)?(v)) + } + } + } + }; } #[cfg(test)] @@ -28,4 +273,10 @@ mod tests { test_bignum! { function: ::from_bits(a: u32) } -} + + test_reversible_conversion!(into_raw_parts, from_raw_parts(a, b, c) -> FTEST, FTEST::to_bits); + test_reversible_conversion!(into_biased_parts, from_biased_parts(a, b, c) -> FTEST, FTEST::to_bits); + test_reversible_conversion!(into_signed_biased_parts, from_signed_biased_parts(a, b, c) -> FTEST, FTEST::to_bits); + test_reversible_conversion!(into_signed_parts, from_signed_parts(a, b, c) -> FTEST, FTEST::to_bits); + test_reversible_conversion!(into_normalised_signed_parts, from_normalised_signed_parts(a, b, c) -> FTEST, FTEST::to_bits); +} \ No newline at end of file diff --git a/src/float/endian.rs b/src/float/endian.rs index 6de84fe..7b196d5 100644 --- a/src/float/endian.rs +++ b/src/float/endian.rs @@ -1,36 +1,60 @@ use super::Float; -use crate::digit::u8 as digit; use crate::BUintD8; +use crate::doc; #[cfg(feature = "nightly")] impl Float { + #[cfg(feature = "nightly")] + #[doc = doc::endian::to_be_bytes!(F)] + #[doc = doc::requires_feature!("nightly")] + #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_be_bytes(self) -> [u8; W * digit::BYTES as usize] { + pub const fn to_be_bytes(self) -> [u8; BUintD8::::BYTES_USIZE] { self.to_bits().to_be_bytes() } + #[cfg(feature = "nightly")] + #[doc = doc::endian::to_le_bytes!(F)] + #[doc = doc::requires_feature!("nightly")] + #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_le_bytes(self) -> [u8; W * digit::BYTES as usize] { + pub const fn to_le_bytes(self) -> [u8; BUintD8::::BYTES_USIZE] { self.to_bits().to_le_bytes() } + #[cfg(feature = "nightly")] + #[doc = doc::endian::to_ne_bytes!(F)] + #[doc = doc::requires_feature!("nightly")] + #[must_use = doc::must_use_op!()] #[inline] - pub const fn to_ne_bytes(self) -> [u8; W * digit::BYTES as usize] { + pub const fn to_ne_bytes(self) -> [u8; BUintD8::::BYTES_USIZE] { self.to_bits().to_ne_bytes() } + #[cfg(feature = "nightly")] + #[doc = doc::endian::from_be_bytes!(F)] + #[doc = doc::requires_feature!("nightly")] + #[must_use] #[inline] - pub const fn from_be_bytes(bytes: [u8; W * digit::BYTES as usize]) -> Self { + pub const fn from_be_bytes(bytes: [u8; BUintD8::::BYTES_USIZE]) -> Self { Self::from_bits(BUintD8::from_be_bytes(bytes)) } + #[cfg(feature = "nightly")] + #[doc = doc::endian::from_le_bytes!(F)] + #[doc = doc::requires_feature!("nightly")] + #[must_use] #[inline] - pub const fn from_le_bytes(bytes: [u8; W * digit::BYTES as usize]) -> Self { + pub const fn from_le_bytes(bytes: [u8; BUintD8::::BYTES_USIZE]) -> Self { Self::from_bits(BUintD8::from_le_bytes(bytes)) } + #[cfg(feature = "nightly")] + #[doc = doc::endian::from_ne_bytes!(F)] + #[doc = doc::requires_feature!("nightly")] + #[must_use] #[inline] - pub const fn from_ne_bytes(bytes: [u8; W * digit::BYTES as usize]) -> Self { + pub const fn from_ne_bytes(bytes: [u8; BUintD8::::BYTES_USIZE]) -> Self { Self::from_bits(BUintD8::from_ne_bytes(bytes)) } } diff --git a/src/float/math.rs b/src/float/math.rs deleted file mode 100644 index 921f87b..0000000 --- a/src/float/math.rs +++ /dev/null @@ -1,783 +0,0 @@ -use super::Float; -use crate::cast::As; -use crate::{BIntD8, BUintD8}; - -/*/// Returns tuple of division and whether u is less than v -pub const fn div_float(u: BUintD8, v: BUintD8) -> (BUintD8, bool) { - let gt = if let core::cmp::Ordering::Less = u.cmp(&v) { - 0 - } else { - 1 - }; - // `self` is padded with N trailing zeros (less significant digits). - // `v` is padded with N leading zeros (more significant digits). - let shift = v.digits[N - 1].leading_zeros(); - // `shift` is between 0 and 64 inclusive. - let v = super::unchecked_shl(v, shift); - // `v` is still padded with N leading zeros. - - struct Remainder { - first: Digit, - second: Digit, - rest: [Digit; M], - } - impl Remainder { - const fn new(uint: BUintD8, shift: ExpType) -> Self { - // This shift can be anything from 0 to 64 inclusive. - // Scenarios: - // * shift by 0 -> nothing happens, still N trailing zeros. - // * shift by 64 -> all digits shift by one to the right, there are now (N - 1) trailing zeros and 1 leading zero. - // * shift by amount between 0 and 64 -> there may be 0 or 1 leading zeros and (N - 1) or N trailing zeros. - // So indexing between 2N - 1 and N - 1 will get any non-zero digits. - // Instead of a logical right shift, we will perform a rotate right on the uint - this is the same except the part of the number which may have been removed from the right shift is instead brought to the most significant digit of the number. - // Then do fancy bit shifts and logic to separate the first and last non zero digits. - let shift = Digit::BITS - shift; - let mut rest = uint.rotate_right(shift); - let last_digit = rest.digits[M - 1]; - let last = (last_digit << shift) >> shift; - let second = last_digit ^ last; - rest.digits[M - 1] = last; - Self { - first: 0, - second, - rest: rest.digits, - } - } - const fn index(&self, index: usize) -> Digit { - if index == M - 1 { - self.first - } else if index == M { - self.second - } else if index > M { - self.rest[index - M - 1] - } else { - // There are M - 1 trailing zeros so we can return zero here. - 0 - } - } - const fn set_digit(&mut self, index: usize, digit: Digit) { - if index == M - 1 { - self.first = digit; - } else if index == M { - self.second = digit; - } else if index > M { - self.rest[index - M - 1] = digit; - } - } - /*const fn to_uint(self, shift: ExpType) -> BUintD8 { - let mut out = BUintD8::ZERO; - let mut i = 0; - while i < M { - out.digits[i] = self.index(i) >> shift; - i += 1; - } - if shift > 0 { - let mut i = 0; - while i < M { - out.digits[i] |= self.rest[i] << (Digit::BITS - shift); - i += 1; - } - } - out - }*/ - const fn sub(&mut self, start: usize, rhs: Mul, end: usize) -> bool { - let mut carry = false; - let mut i = 0; - while i < end { - let (sum, overflow1) = rhs.index(i).overflowing_add(carry as Digit); - let (sub, overflow2) = self.index(i + start).overflowing_sub(sum); - self.set_digit(i + start, sub); - carry = overflow1 || overflow2; - i += 1; - } - carry - } - const fn add(&mut self, start: usize, rhs: [Digit; M], end: usize) -> bool { - let mut carry = false; - let mut i = 0; - while i < end { - let (sum, overflow1) = rhs[i].overflowing_add(carry as Digit); - let (sum, overflow2) = self.index(i + start).overflowing_sub(sum); - self.set_digit(i + start, sum); - carry = overflow1 || overflow2; - i += 1; - } - carry - } - } - - // The whole implementation of `Mul` doesn't need to change as it is already padded with N leading zeros. - struct Mul { - last: Digit, - rest: [Digit; M], - } - impl Mul { - const fn new(uint: BUintD8, rhs: Digit) -> Self { - let mut rest = [0; M]; - let mut carry: Digit = 0; - let mut i = 0; - while i < M { - let (prod, c) = crate::arithmetic::mul_carry_unsigned(carry, 0, uint.digits[i], rhs); - carry = c; - rest[i] = prod; - i += 1; - } - Self { - last: carry, - rest, - } - } - const fn index(&self, index: usize) -> Digit { - if index == M { - self.last - } else { - self.rest[index] - } - } - } - - let mut u = Remainder::new(u, shift); - let mut q = BUintD8::ZERO; - let v_n_1 = v.digits[N - 1]; - let v_n_2 = v.digits[N - 2]; - let gt_half = v_n_1 > digit::HALF; - - let mut j = N + gt; - while j > gt { - j -= 1; - let u_jn = u.index(j + N); - let mut q_hat = if u_jn < v_n_1 { - let (mut q_hat, mut r_hat) = if gt_half { - BUintD8::::div_wide(u_jn, u.index(j + N - 1), v_n_1) - } else { - BUintD8::::div_half(u_jn, u.index(j + N - 1), v_n_1) - }; - loop { - let a = ((r_hat as DoubleDigit) << digit::BITS) | u.index(j + N - 2) as DoubleDigit; - let b = q_hat as DoubleDigit * v_n_2 as DoubleDigit; - if b <= a { - break; - } - /*let (hi, lo) = digit::from_double_digit(q_hat as DoubleDigit * v_n_2 as DoubleDigit); - if hi < r_hat { - break; - } else if hi == r_hat && lo <= u.index(j + n - 2) { - break; - }*/ - q_hat -= 1; - let (new_r_hat, overflow) = r_hat.overflowing_add(v_n_1); - r_hat = new_r_hat; - if overflow { - break; - } - } - q_hat - } else { - Digit::MAX - }; - - let q_hat_v = Mul::new(v, q_hat); - let carry = u.sub(j, q_hat_v, N + 1); - if carry { - q_hat -= 1; - let carry = u.add(j, v.digits, N); - u.set_digit(j + N, u.index(j + N).wrapping_add(carry as Digit)); - } - // if self is less than other, q_hat is 0 - q.digits[j - gt] = q_hat; - } - - (q, gt == 0) - //super::unchecked_shl(self.as_buint::<{N * 2}>(), N as u16 * 64).div_rem(v.as_buint::<{N * 2}>()).0 -}*/ - -/* -All functions: -mul_add, div_euclid, rem_euclid, powi, powf, exp, exp2, ln, log, log2, log10, cbrt, hypot, sin, cos, tan, asin, acos, atan, atan2, sin_cos, exp_m1, ln_1p, sinh, cosh, tanh, asinh, acosh, atanh, to_degrees, to_radians -*/ - -impl Float { - #[inline] - pub const fn abs(self) -> Self { - if self.is_sign_negative() { - self.neg() - } else { - self - } - } - - pub fn sqrt(self) -> Self { - handle_nan!(self; self); - if self.is_zero() { - return self; - } - let bits = self.to_bits(); - if bits == Self::INFINITY.to_bits() { - return Self::INFINITY; - } - if self.is_sign_negative() { - return Self::NAN; - /*let u = BUintD8::MAX << (Self::MB - 1); - return Self::from_bits(u);*/ - } - - let tiny = Self::from_bits(BUintD8::from(0b11011u8) << Self::MB); // TODO: may not work for exponents stored with very few bits - - let mut ix = BIntD8::from_bits(bits); - let mut i: BIntD8; - let mut m = ix >> Self::MB; - if m.is_zero() { - /* subnormal x */ - i = BIntD8::ZERO; - while (ix & (BIntD8::ONE << Self::MB)).is_zero() { - ix <<= 1; - i = i + BIntD8::ONE; - } - m -= i - BIntD8::ONE; - } - m -= Self::EXP_BIAS; /* unbias exponent */ - ix = (ix & BIntD8::from_bits(BUintD8::MAX >> (Self::BITS - Self::MB))) - | (BIntD8::ONE << Self::MB); - if m & BIntD8::ONE == BIntD8::ONE { - /* odd m, double x to make it even */ - ix += ix; - } - m >>= 1; /* m = [m/2] */ - - /* generate sqrt(x) bit by bit */ - ix += ix; - let mut q = BIntD8::ZERO; - let mut s = BIntD8::ZERO; - let mut r = BUintD8::ONE << (Self::MB + 1); /* r = moving bit from right to left */ - - let mut t: BIntD8; - while !r.is_zero() { - t = s + BIntD8::from_bits(r); - if t <= ix { - s = t + BIntD8::from_bits(r); - ix -= t; - q += BIntD8::from_bits(r); - } - ix += ix; - r = r >> 1u8; - } - - /* use floating add to find out rounding direction */ - let mut z: Self; - if !ix.is_zero() { - z = Self::ONE - tiny; /* raise inexact flag */ - if z >= Self::ONE { - z = Self::ONE + tiny; - if z > Self::ONE { - q += BIntD8::TWO; - } else { - q += q & BIntD8::ONE; - } - } - } - - ix = (q >> 1u8) + BIntD8::from_bits((BUintD8::MAX << (Self::MB + 1 + 2)) >> 2u8); - ix += m << Self::MB; - Self::from_bits(ix.to_bits()) - } - - #[inline] - pub fn round(self) -> Self { - let a = Self::HALF - Self::QUARTER * Self::EPSILON; - (self + a.copysign(self)).trunc() - } - - #[inline] - pub fn ceil(self) -> Self { - let mut u = self.to_bits(); - let e = self.exponent() - Self::EXP_BIAS; - - if e >= BIntD8::from(MB) { - return self; - } - if !e.is_negative() { - let m = (BUintD8::MAX >> (Self::BITS - Self::MB)) >> e; - if (u & m).is_zero() { - return self; - } - if self.is_sign_positive() { - u += m; - } - u &= !m; - } else if self.is_sign_negative() { - return Self::NEG_ZERO; - } else if !(u << 1u8).is_zero() { - return Self::ONE; - } - Self::from_bits(u) - } - - #[inline] - pub fn floor(self) -> Self { - let mut bits = self.to_bits(); - let e = self.exponent() - Self::EXP_BIAS; - - if e >= BIntD8::from(MB) { - return self; - } - if !e.is_negative() { - let m = (BUintD8::MAX >> (Self::BITS - Self::MB)) >> e; - if (bits & m).is_zero() { - return self; - } - if self.is_sign_negative() { - bits += m; - } - bits &= !m; - } else if self.is_sign_positive() { - return Self::ZERO; - } else if !(bits << 1u8).is_zero() { - return Self::NEG_ONE; - } - Self::from_bits(bits) - } - - #[inline] - pub fn trunc(self) -> Self { - //return self.fract_trunc().1; - let mut i = self.to_bits(); - let exp_bits = BIntD8::from(Self::BITS - Self::MB); - let mut e = self.exponent() - Self::EXP_BIAS + exp_bits; - - if e >= BIntD8::from(Self::BITS) { - return self; - } - if e < exp_bits { - e = BIntD8::ONE; - } - let m = BIntD8::NEG_ONE.to_bits() >> e; - if (i & m).is_zero() { - return self; - } - i &= !m; - Self::from_bits(i) - } - - #[inline] - pub fn fract(self) -> Self { - self.fract_trunc().0 - } - - #[inline] - pub fn fract_trunc(self) -> (Self, Self) { - handle_nan!((self, self); self); - if self.is_infinite() { - return (Self::NAN, self); - } - - let mut u = self.to_bits(); - let e = self.exponent() - Self::EXP_BIAS; - - if self.is_infinite() { - return (Self::NAN, self); - } - - if e >= BIntD8::from(MB) { - return (Self::ZERO, self); - } - if e.is_negative() { - let trunc = if self.is_sign_negative() { - Self::NEG_ZERO - } else { - Self::ZERO - }; - if self.is_zero() { - return (Self::ZERO, self); - } - return (self, trunc); - } - - let mask = BUintD8::::MAX >> (e + (Self::BITS - Self::MB).as_::>()); - if (u & mask).is_zero() { - return (Self::ZERO, self); - } - u &= !mask; - let trunc = Self::from_bits(u); - (self - trunc, trunc) - } - - #[inline] - pub fn recip2(self) -> Self - where - [(); W * 2]:, - { - Self::ONE / self - } - - #[inline] - pub fn div_euclid(self, rhs: Self) -> Self - where - [(); W * 2]:, - { - let div = (self / rhs).trunc(); - if self % rhs < Self::ZERO { - return if rhs > Self::ZERO { - div - Self::ONE - } else { - div + Self::ONE - }; - } - div - } - - #[inline] - pub fn rem_euclid(self, rhs: Self) -> Self { - let rem = self % rhs; - if rem < Self::NEG_ZERO { - rem + rhs.abs() - } else { - rem - } - } - - #[inline] - pub fn powi(mut self, n: i32) -> Self - where - [(); W * 2]:, - { - // println!("{:032b}, {}", self.to_bits(), n); - if n == 0 { - return self; - } - let mut n_abs = n.unsigned_abs(); // unsigned abs since otherwise overflow could occur (if n == i32::MIN) - let mut y = Self::ONE; - while n_abs > 1 { - if n_abs & 1 == 1 { - // out = out * self; - y = y * self; - } - self = self * self; - n_abs >>= 1; - } - if n.is_negative() { - Self::ONE / (self * y) - } else { - self * y - } - } - - /*pub fn remquof(mut self, mut y: Self) -> /*(Self, BIntD8)*/(Self, Self) { - handle_nan!(self; self); - handle_nan!(y; y); - if self.is_infinite() || y.is_infinite() { - return (Self::NAN, Self::NAN); - } - - if y.is_zero() { - return (Self::QNAN, Self::QNAN); - } - if self.is_zero() { - return (self, self); - } - let ux = self.to_bits(); - let mut uy = y.to_bits(); - let mut ex = self.exponent(); - let mut ey = y.exponent(); - let sx = self.is_sign_negative(); - let sy = y.is_sign_negative(); - let mut uxi = ux; - - /* normalize x and y */ - let mut i; - if ex.is_zero() { - i = uxi << (Self::BITS - Self::MB); - while !BIntD8::from_bits(i).is_negative() { - ex -= BIntD8::ONE; - i <<= 1u8; - } - uxi <<= -ex + BIntD8::ONE; - } else { - uxi &= BUintD8::MAX >> (Self::BITS - Self::MB); - uxi |= BUintD8::ONE << Self::MB; - } - if ey.is_zero() { - i = uy << (Self::BITS - Self::MB); - while !BIntD8::from_bits(i).is_negative() { - ey -= BIntD8::ONE; - i <<= 1u8; - } - uy <<= -ey + BIntD8::ONE; - } else { - uy &= BUintD8::MAX >> (Self::BITS - Self::MB); - uy |= BUintD8::ONE << Self::MB; - } - - let mut q = BUintD8::::ZERO; - if ex + BIntD8::ONE != ey { - if ex < ey { - return (self, 0); - } - /* x mod y */ - while ex > ey { - i = uxi.wrapping_sub(uy); - if !BIntD8::from_bits(i).is_negative() { - uxi = i; - q += BUintD8::ONE; - } - uxi <<= 1u8; - q <<= 1u8; - ex -= BIntD8::ONE; - } - i = uxi.wrapping_sub(uy); - if !BIntD8::from_bits(i).is_negative() { - uxi = i; - q += BUintD8::ONE; - } - if uxi.is_zero() { - //ex = BIntD8::TWO - BIntD8::from(Self::BITS); - ex = BIntD8::from(-60i8); - } else { - while (uxi >> Self::MB).is_zero() { - uxi <<= 1u8; - ex -= BIntD8::ONE; - } - } - } - - /* scale result and decide between |x| and |x|-|y| */ - if ex.is_positive() { - uxi -= BUintD8::ONE << Self::MB; - uxi |= ex.to_bits() << Self::MB; - } else { - uxi >>= -ex + BIntD8::ONE; - } - self = Self::from_bits(uxi); - if sy { - y = -y; - } - if ex == ey || (ex + BIntD8::ONE == ey && (Self::TWO * self > y || (Self::TWO * self == y && !(q % BUintD8::TWO).is_zero()))) { - self = self - y; - q += BUintD8::ONE; - } - q &= BUintD8::MAX >> 1u8; - let quo = if sx ^ sy { -BIntD8::from_bits(q) } else { BIntD8::from_bits(q) }; - if sx { - (-self, quo) - } else { - (self, quo) - } - }*/ - - #[cfg(test)] - pub(crate) fn to_f64(self) -> f64 { - f64::from_bits(self.to_bits().as_()) - } - - #[cfg(test)] - pub(crate) fn to_f32(self) -> f32 { - f32::from_bits(self.to_bits().as_()) - } -} - -#[cfg(test)] -impl super::F32 { - #[inline] - pub fn recip(self) -> Self { - /*let (e, m) = self.exp_mant(); - let normalised = Self::from_exp_mant(self.is_sign_negative(), Self::EXP_BIAS.to_bits() - BUintD8::ONE, m); - println!("norm: {}", normalised.to_f64()); - let r = normalised.recip_internal(); - let e = self.exponent() - Self::EXP_BIAS; - //println!("{}", e); - let e = (-e) + Self::EXP_BIAS; - Self::from_exp_mant(self.is_sign_negative(), e.to_bits() - BUintD8::ONE, r.exp_mant().1)*/ - self.recip_internal() - } - - #[inline] - pub fn recip_internal(self) -> Self { - if self.is_zero() { - return Self::NAN; - } - // solve 1/b - x = 0 so 1/x - b = 0 =: f(x) - // x_{n + 1} = x_n - f(x_n) / f'(x_n) - // = x_n - (1/x_n - b) / (-1/x_n^2) - // = x_n + (x_n - b x_n^2) - // = x_n (2 - b x_n) - let (e, m) = self.exp_mant(); - let e = BIntD8::from_bits(e) - Self::EXP_BIAS; - let inv_e = (-e + Self::EXP_BIAS).to_bits() - BUintD8::ONE; - //println!("{}", e); - let normalised = Self::from_exp_mant(false, Self::EXP_BIAS.to_bits() - BUintD8::ONE, m); - if normalised == Self::ONE { - return Self::from_exp_mant(self.is_sign_negative(), inv_e + 1, BUintD8::ZERO); - } - //println!("norm init: {:064b}", normalised.to_bits()); - let mut x_n = Self::from_bits( - (normalised * Self::HALF).to_bits() ^ (BUintD8::MAX >> (Self::BITS - Self::MB)), - ); - - let mut m_n = x_n.exp_mant().1 << 1; - - /* - 0.5 <= x_n < 1 - 1 <= normalised < 2 - so 0.5 <= x_n * normalised < 2 - 1 <= x_n * 2 < 2 - */ - - //println!("x_n: {}", x_n.to_f32()); - let mut iters = 0; - loop { - let a1 = x_n * Self::TWO; - let a2 = x_n * normalised * x_n; - let x_n_1 = a1 - a2; - assert!(a1.to_f32() >= 1.0 && a1.to_f32() <= 2.0); - assert!(a2.to_f32() >= 0.5 && a2.to_f32() <= 1.0); - - let ma1 = m_n << 1; - let xnf = x_n_1.to_f32(); - assert!( - 0.5 <= xnf && xnf < 1.0, - "{}, norm: {}", - xnf, - normalised.to_f32() - ); - // x_n * (2 - norm * x_n) - if x_n_1 == x_n || iters == 100 { - //println!("done: new: {}, old: {}", x_n_1.to_f64(), x_n.to_f64()); - //println!("norm: {:064b}", x_n_1.to_bits()); - let mut m = x_n_1.exp_mant().1; - if m.bit(Self::MB) { - m ^= BUintD8::ONE << Self::MB; - } - let unnormalised = Self::from_exp_mant(self.is_sign_negative(), inv_e, m); - println!("iters: {}", iters); - return unnormalised; - } - x_n = x_n_1; - iters += 1; - } - } -} - -#[cfg(test)] -mod tests { - use crate::test::test_bignum; - use crate::test::types::{ftest, FTEST}; - - test_bignum! { - function: ::abs(f: ftest) - } - - test_bignum! { - function: ::sqrt(f: ftest) - } - - test_bignum! { - function: ::ceil(f: ftest) - } - - test_bignum! { - function: ::floor(f: ftest) - } - - /*test_bignum! { - function: ::round(f: ftest) - }*/ - - test_bignum! { - function: ::trunc(f: ftest) - } - - test_bignum! { - function: ::fract(f: ftest) - } - - test_bignum! { - function: ::div_euclid(f1: ftest, f2: ftest) - } - - test_bignum! { - function: ::rem_euclid(f1: ftest, f2: ftest) - } - - test_bignum! { - function: ::powi(f: ftest, n: i32) - } - - /*#[test] - fn fmod() { - use super::super::F64; - let f1 = 0.0; - let f2 = f64::INFINITY; - //println!("{:064b}", ((-0.0f64).div_euclid(f2)).to_bits()); - let a = (F64::from(f1) * (F64::from(f2))).to_bits(); - let b = (f1 * (f2)).to_bits(); - /*println!("{:064b}", a); - println!("{:064b}", b);*/ - assert!(a == b.into()); - } - - quickcheck::quickcheck! { - fn qc_recip(u: u32) -> quickcheck::TestResult { - let f = f32::from_bits(u); - if !f.is_finite() || f >= 2.0 || f <= 1.0 { - return quickcheck::TestResult::discard(); - } - - let b1 = f.recip().to_bits(); - let b2 = super::super::F32::from(f).recip().to_f32().to_bits(); - return quickcheck::TestResult::from_bool(b1 == b2 || b1 + 1 == b2); - } - } - - #[test] - fn recip2() { - assert!((0.0 as ftest).to_bits().count_zeros() == 32); - use super::super::F32; - - let f1 = 1.7517333f32; //f32::from_bits(0b0_01111110_01001000000000000000000u32); - println!("{}", f1); - - println!("{:032b}", f1.recip().to_bits()); - println!("{:032b}", F32::from(f1).recip_internal().to_f32().to_bits()); - panic!("") - } - - test_bignum! { - function: ::recip(f: ftest), - skip: !f.is_finite() || f == 0.0 || f >= 2.0 || f <= 1.0 - } - - #[test] - fn recip_u8() { - let mut g = true; - let mut close = true; - for i in 1..=u8::MAX { - let u = 0b0_01111110_00000000000000000000000u32 | ((i as u32) << 15); - let f = f32::from_bits(u); - - let b1 = f.recip().to_bits(); - let b2 = super::super::F32::from(f) - .recip_internal() - .to_f32() - .to_bits(); - let eq = b1 == b2; - if !eq { - println!("{:08b}", i); - if b2 - b1 != 1 { - close = false; - } - } - if b1 > b2 { - if b1 > b2 { - g = false; - } - } - } - println!("all greater: {}", g); - println!("all close: {}", close); - panic!("") - }*/ -} -//0011111111101001100110011001100110011001100111110001100011111001 -//0011111111100000000000000000000000000000000000110110111110011100 - -//0011111111110011111111111111111111111111111110111011010001111101 -// 0011111111111111111111111111111111111111111110010010000011001000 diff --git a/src/float/math/mod.rs b/src/float/math/mod.rs new file mode 100644 index 0000000..9354f20 --- /dev/null +++ b/src/float/math/mod.rs @@ -0,0 +1,112 @@ +use super::Float; +use crate::doc; + +mod sqrt; + +/* +All functions: +mul_add, div_euclid, rem_euclid, powi, powf, exp, exp2, ln, log, log2, log10, cbrt, hypot, sin, cos, tan, asin, acos, atan, atan2, sin_cos, exp_m1, ln_1p, sinh, cosh, tanh, asinh, acosh, atanh, to_degrees, to_radians +*/ + +/* +TODO: acos, acosh, asin, asinh, atan, atan2, atanh, cbrt, cos, cosh, exp, exp2, exp_m1, gamma, hypot, ln, ln_1p, ln_gamma, log, log10, log2, midpoint, mul_add, powf, recip, round_ties_even, tan, tanh, to_degrees, to_radians, +*/ + +#[doc = doc::math::impl_desc!()] +impl Float { + #[doc = doc::math::abs!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub const fn abs(self) -> Self { + if self.is_sign_negative() { + self.neg() + } else { + self + } + } + + #[doc = doc::math::sqrt!(F)] + #[must_use = doc::must_use_op!(float)] + pub fn sqrt(self) -> Self { + self.sqrt_internal() + } + + #[doc = doc::math::div_euclid!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn div_euclid(self, rhs: Self) -> Self + where + [(); W * 2]:, + { + let div = (self / rhs).trunc(); + if self % rhs < Self::ZERO { + return if rhs > Self::ZERO { + div - Self::ONE + } else { + div + Self::ONE + }; + } + div + } + + #[doc = doc::math::rem_euclid!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn rem_euclid(self, rhs: Self) -> Self { + let rem = self % rhs; + if rem < Self::NEG_ZERO { + rem + rhs.abs() + } else { + rem + } + } + + #[doc = doc::math::powi!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn powi(mut self, n: i32) -> Self + where + [(); W * 2]:, + { + if n == 0 { + return Self::ONE; + } + let mut n_abs = n.unsigned_abs(); // unsigned abs since otherwise overflow could occur (if n == i32::MIN) + let mut y = Self::ONE; + while n_abs > 1 { + if n_abs & 1 == 1 { + // out = out * self; + y = y * self; + } + self = self * self; + n_abs >>= 1; + } + if n.is_negative() { + Self::ONE / (self * y) + } else { + self * y + } + } +} + +#[cfg(test)] +mod tests { + use crate::test::test_bignum; + use crate::test::types::{ftest, FTEST}; + + test_bignum! { + function: ::abs(f: ftest) + } + test_bignum! { + function: ::sqrt(f: ftest) + } + test_bignum! { + function: ::div_euclid(f1: ftest, f2: ftest) + } + test_bignum! { + function: ::rem_euclid(f1: ftest, f2: ftest) + } + test_bignum! { + function: ::powi(f: ftest, n: i32) + } +} \ No newline at end of file diff --git a/src/float/math/recip.rs b/src/float/math/recip.rs new file mode 100644 index 0000000..e5cc016 --- /dev/null +++ b/src/float/math/recip.rs @@ -0,0 +1,178 @@ +#[cfg(test)] +impl super::F32 { + #[inline] + pub fn recip(self) -> Self { + /*let (_, e, m) = self.into_biased_parts(); + let normalised = Self::from_raw_parts(self.is_sign_negative(), Self::EXP_BIAS.to_bits() - BUintD8::ONE, m); + println!("norm: {}", normalised.to_f64()); + let r = normalised.recip_internal(); + let e = self.exponent() - Self::EXP_BIAS; + //println!("{}", e); + let e = (-e) + Self::EXP_BIAS; + Self::from_raw_parts(self.is_sign_negative(), e.to_bits() - BUintD8::ONE, r.into_biased_parts().2)*/ + self.recip_internal() + } + + #[inline] + pub fn recip_internal(self) -> Self { + if self.is_zero() { + return Self::NAN; + } + // solve 1/b - x = 0 so 1/x - b = 0 =: f(x) + // x_{n + 1} = x_n - f(x_n) / f'(x_n) + // = x_n - (1/x_n - b) / (-1/x_n^2) + // = x_n + (x_n - b x_n^2) + // = x_n (2 - b x_n) + let (_, e, m) = self.into_signed_parts(); + let inv_e = (-e + Self::EXP_BIAS).to_bits() - BUintD8::ONE; + //println!("{}", e); + let normalised = Self::from_raw_parts(false, Self::EXP_BIAS.to_bits() - BUintD8::ONE, m); + if normalised == Self::ONE { + return Self::from_raw_parts(self.is_sign_negative(), inv_e + 1, BUintD8::ZERO); + } + //println!("norm init: {:064b}", normalised.to_bits()); + let mut x_n = Self::from_bits( + (normalised * Self::HALF).to_bits() ^ (BUintD8::MAX >> (Self::BITS - Self::MB)), + ); + + let mut m_n = x_n.into_biased_parts().2 << 1; + + /* + 0.5 <= x_n < 1 + 1 <= normalised < 2 + so 0.5 <= x_n * normalised < 2 + 1 <= x_n * 2 < 2 + */ + + //println!("x_n: {}", x_n.to_f32()); + let mut iters = 0; + loop { + let a1 = x_n * Self::TWO; + let a2 = x_n * normalised * x_n; + let x_n_1 = a1 - a2; + assert!(a1.to_f32() >= 1.0 && a1.to_f32() <= 2.0); + assert!(a2.to_f32() >= 0.5 && a2.to_f32() <= 1.0); + + let ma1 = m_n << 1; + let xnf = x_n_1.to_f32(); + assert!( + 0.5 <= xnf && xnf < 1.0, + "{}, norm: {}", + xnf, + normalised.to_f32() + ); + // x_n * (2 - norm * x_n) + if x_n_1 == x_n || iters == 100 { + //println!("done: new: {}, old: {}", x_n_1.to_f64(), x_n.to_f64()); + //println!("norm: {:064b}", x_n_1.to_bits()); + let mut m = x_n_1.into_biased_parts().2; + if m.bit(Self::MB) { + m ^= BUintD8::ONE << Self::MB; + } + let unnormalised = Self::from_raw_parts(self.is_sign_negative(), inv_e, m); + // println!("iters: {}", iters); + return unnormalised; + } + x_n = x_n_1; + iters += 1; + } + } + + #[cfg(test)] + pub(crate) fn to_f64(self) -> f64 { + f64::from_bits(self.to_bits().as_()) + } + + #[cfg(test)] + pub(crate) fn to_f32(self) -> f32 { + f32::from_bits(self.to_bits().as_()) + } + + #[inline] + pub fn recip2(self) -> Self + where + [(); W * 2]:, + { + Self::ONE / self + } +} + +#[cfg(test)] +mod tests { + + + /*#[test] + fn fmod() { + use super::super::F64; + let f1 = 0.0; + let f2 = f64::INFINITY; + //println!("{:064b}", ((-0.0f64).div_euclid(f2)).to_bits()); + let a = (F64::from(f1) * (F64::from(f2))).to_bits(); + let b = (f1 * (f2)).to_bits(); + /*println!("{:064b}", a); + println!("{:064b}", b);*/ + assert!(a == b.into()); + } + + quickcheck::quickcheck! { + fn qc_recip(u: u32) -> quickcheck::TestResult { + let f = f32::from_bits(u); + if !f.is_finite() || f >= 2.0 || f <= 1.0 { + return quickcheck::TestResult::discard(); + } + + let b1 = f.recip().to_bits(); + let b2 = super::super::F32::from(f).recip().to_f32().to_bits(); + return quickcheck::TestResult::from_bool(b1 == b2 || b1 + 1 == b2); + } + } + + #[test] + fn recip2() { + assert!((0.0 as ftest).to_bits().count_zeros() == 32); + use super::super::F32; + + let f1 = 1.7517333f32; //f32::from_bits(0b0_01111110_01001000000000000000000u32); + println!("{}", f1); + + println!("{:032b}", f1.recip().to_bits()); + println!("{:032b}", F32::from(f1).recip_internal().to_f32().to_bits()); + panic!("") + } + + test_bignum! { + function: ::recip(f: ftest), + skip: !f.is_finite() || f == 0.0 || f >= 2.0 || f <= 1.0 + } + + #[test] + fn recip_u8() { + let mut g = true; + let mut close = true; + for i in 1..=u8::MAX { + let u = 0b0_01111110_00000000000000000000000u32 | ((i as u32) << 15); + let f = f32::from_bits(u); + + let b1 = f.recip().to_bits(); + let b2 = super::super::F32::from(f) + .recip_internal() + .to_f32() + .to_bits(); + let eq = b1 == b2; + if !eq { + println!("{:08b}", i); + if b2 - b1 != 1 { + close = false; + } + } + if b1 > b2 { + if b1 > b2 { + g = false; + } + } + } + println!("all greater: {}", g); + println!("all close: {}", close); + panic!("") + }*/ +} \ No newline at end of file diff --git a/src/float/math/remquof.rs b/src/float/math/remquof.rs new file mode 100644 index 0000000..bd5f95e --- /dev/null +++ b/src/float/math/remquof.rs @@ -0,0 +1,103 @@ + + + /*pub fn remquof(mut self, mut y: Self) -> /*(Self, BIntD8)*/(Self, Self) { + handle_nan!(self; self); + handle_nan!(y; y); + if self.is_infinite() || y.is_infinite() { + return (Self::NAN, Self::NAN); + } + + if y.is_zero() { + return (Self::QNAN, Self::QNAN); + } + if self.is_zero() { + return (self, self); + } + let ux = self.to_bits(); + let mut uy = y.to_bits(); + let mut ex = self.exponent(); + let mut ey = y.exponent(); + let sx = self.is_sign_negative(); + let sy = y.is_sign_negative(); + let mut uxi = ux; + + /* normalize x and y */ + let mut i; + if ex.is_zero() { + i = uxi << (Self::BITS - Self::MB); + while !BIntD8::from_bits(i).is_negative() { + ex -= BIntD8::ONE; + i <<= 1u8; + } + uxi <<= -ex + BIntD8::ONE; + } else { + uxi &= BUintD8::MAX >> (Self::BITS - Self::MB); + uxi |= BUintD8::ONE << Self::MB; + } + if ey.is_zero() { + i = uy << (Self::BITS - Self::MB); + while !BIntD8::from_bits(i).is_negative() { + ey -= BIntD8::ONE; + i <<= 1u8; + } + uy <<= -ey + BIntD8::ONE; + } else { + uy &= BUintD8::MAX >> (Self::BITS - Self::MB); + uy |= BUintD8::ONE << Self::MB; + } + + let mut q = BUintD8::::ZERO; + if ex + BIntD8::ONE != ey { + if ex < ey { + return (self, 0); + } + /* x mod y */ + while ex > ey { + i = uxi.wrapping_sub(uy); + if !BIntD8::from_bits(i).is_negative() { + uxi = i; + q += BUintD8::ONE; + } + uxi <<= 1u8; + q <<= 1u8; + ex -= BIntD8::ONE; + } + i = uxi.wrapping_sub(uy); + if !BIntD8::from_bits(i).is_negative() { + uxi = i; + q += BUintD8::ONE; + } + if uxi.is_zero() { + //ex = BIntD8::TWO - BIntD8::from(Self::BITS); + ex = BIntD8::from(-60i8); + } else { + while (uxi >> Self::MB).is_zero() { + uxi <<= 1u8; + ex -= BIntD8::ONE; + } + } + } + + /* scale result and decide between |x| and |x|-|y| */ + if ex.is_positive() { + uxi -= BUintD8::ONE << Self::MB; + uxi |= ex.to_bits() << Self::MB; + } else { + uxi >>= -ex + BIntD8::ONE; + } + self = Self::from_bits(uxi); + if sy { + y = -y; + } + if ex == ey || (ex + BIntD8::ONE == ey && (Self::TWO * self > y || (Self::TWO * self == y && !(q % BUintD8::TWO).is_zero()))) { + self = self - y; + q += BUintD8::ONE; + } + q &= BUintD8::MAX >> 1u8; + let quo = if sx ^ sy { -BIntD8::from_bits(q) } else { BIntD8::from_bits(q) }; + if sx { + (-self, quo) + } else { + (self, quo) + } + }*/ \ No newline at end of file diff --git a/src/float/math/sqrt.rs b/src/float/math/sqrt.rs new file mode 100644 index 0000000..e43c521 --- /dev/null +++ b/src/float/math/sqrt.rs @@ -0,0 +1,79 @@ +use crate::{float::{FloatExponent, UnsignedFloatExponent}, BIntD8, BUintD8}; +use super::Float; + +impl Float { + pub(super) fn sqrt_internal(self) -> Self { + handle_nan!(self; self); + if self.is_zero() { + return self; + } + let bits = self.to_bits(); + if bits == Self::INFINITY.to_bits() { + return Self::INFINITY; + } + if self.is_sign_negative() { + return Self::NAN; + /*let u = BUintD8::MAX << (Self::MB - 1); + return Self::from_bits(u);*/ + } + + let tiny = Self::from_bits(BUintD8::from(0b11011u8) << Self::MB); // TODO: may not work for exponents stored with very few bits + + let mut ix = BIntD8::from_bits(bits); + let mut i: FloatExponent; + let mut m = (bits >> Self::MB).cast_to_unsigned_float_exponent() as FloatExponent; + if m == 0 { + /* subnormal x */ + i = 0; + while (ix & (BIntD8::ONE << Self::MB)).is_zero() { + ix <<= 1; + i = i + 1; + } + m -= i - 1; + } + m -= Self::EXP_BIAS; /* unbias exponent */ + ix = (ix & BIntD8::from_bits(BUintD8::MAX >> (Self::BITS - Self::MB))) + | (BIntD8::ONE << Self::MB); + if m & 1 == 1 { + /* odd m, double x to make it even */ + ix += ix; + } + m >>= 1; /* m = [m/2] */ + + /* generate sqrt(x) bit by bit */ + ix += ix; + let mut q = BIntD8::ZERO; + let mut s = BIntD8::ZERO; + let mut r = BUintD8::ONE << (Self::MB + 1); /* r = moving bit from right to left */ + + let mut t: BIntD8; + while !r.is_zero() { + t = s + BIntD8::from_bits(r); + if t <= ix { + s = t + BIntD8::from_bits(r); + ix -= t; + q += BIntD8::from_bits(r); + } + ix += ix; + r = r >> 1u8; + } + + /* use floating add to find out rounding direction */ + let mut z: Self; + if !ix.is_zero() { + z = Self::ONE - tiny; /* raise inexact flag */ + if z >= Self::ONE { + z = Self::ONE + tiny; + if z > Self::ONE { + q += BIntD8::TWO; + } else { + q += q & BIntD8::ONE; + } + } + } + + ix = (q >> 1u8) + BIntD8::from_bits((BUintD8::MAX << (Self::MB + 1 + 2)) >> 2u8); + ix += (BUintD8::cast_from_unsigned_float_exponent(m as UnsignedFloatExponent) << Self::MB).cast_signed(); + Self::from_bits(ix.to_bits()) + } +} \ No newline at end of file diff --git a/src/float/mod.rs b/src/float/mod.rs index 86892b3..fd48b55 100644 --- a/src/float/mod.rs +++ b/src/float/mod.rs @@ -1,7 +1,5 @@ -use crate::bint::BIntD8; -use crate::cast::{As, CastFrom}; -use crate::digit::u8 as digit; -use crate::{BUintD8, ExpType}; +use crate::{BUintD8, BIntD8, ExpType}; +use crate::doc; type Digit = u8; @@ -11,6 +9,22 @@ pub type F64 = Float<8, 52>; #[cfg(test)] pub type F32 = Float<4, 23>; +#[cfg(test)] +impl From for F64 { + #[inline] + fn from(f: f64) -> Self { + Self::from_bits(f.to_bits().into()) + } +} + +#[cfg(test)] +impl From for F32 { + #[inline] + fn from(f: f32) -> Self { + Self::from_bits(f.to_bits().into()) + } +} + macro_rules! handle_nan { ($ret: expr; $($n: expr), +) => { if $($n.is_nan()) || + { @@ -23,15 +37,24 @@ mod cast; mod classify; mod cmp; mod consts; +mod const_trait_fillers; mod convert; mod endian; mod math; +#[cfg(feature = "numtraits")] +mod numtraits; mod ops; +// mod parse; +#[cfg(feature = "rand")] +mod random; +mod rounding; mod to_str; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +// TODO: THINK ABOUT MAKING FLOAT EXPONENT AT MOST ~128 BITS, THEN COULD USE I128 FOR EXPONENT CALCULATIONS, WOULD BE MUCH FASTER AND USE LESS SPACE + #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[repr(transparent)] @@ -39,6 +62,9 @@ pub struct Float { bits: BUintD8, } +pub(crate) type FloatExponent = i128; +pub(crate) type UnsignedFloatExponent = u128; + // TODO: implement rand traits impl Float { @@ -47,106 +73,80 @@ impl Float { const EXPONENT_BITS: ExpType = Self::BITS - Self::MB - 1; - /*const MANTISSA_WORDS: (usize, usize) = (MB / digit::BITS as usize, MB % digit::BITS as usize); - - const EXPONENT_MASK: BUintD8 = BUintD8::MAX.wrapping_shl(Self::MB) ^ BIntD8::MIN.to_bits();*/ + const MANTISSA_MASK: BUintD8 = BUintD8::MAX.wrapping_shr(Self::EXPONENT_BITS + 1); - pub fn parse(digits: &[u8], exp: i32) -> Self where [u8; W * 2]: { - let one = Self::ONE; - let two = Self::TWO; - let three: Self = 3u8.as_(); - let four: Self = 4u8.as_(); - let five: Self = 5u8.as_(); - let six: Self = 6u8.as_(); - let seven: Self = 7u8.as_(); - let eight: Self = 8u8.as_(); - let nine: Self = 9u8.as_(); - let ten: Self = 10u8.as_(); + const SIGN_MASK: BUintD8 = BIntD8::MAX.to_bits(); - let mut out = Self::ZERO; - let mut pow = Self::ONE; - for d in digits { - let a = Self::cast_from(*d) * pow; + const MANTISSA_IMPLICIT_LEADING_ONE_MASK: BUintD8 = BUintD8::ONE.shl(Self::MB); +} - out = out + a; - pow = pow / ten; +impl BUintD8 { + #[inline] + pub(crate) const fn cast_from_unsigned_float_exponent(mut exp: UnsignedFloatExponent) -> Self { + let mut out = Self::MIN; + let mut i = 0; + while exp != 0 && i < W { + let masked = exp as Digit & Digit::MAX; + out.digits[i] = masked; + if UnsignedFloatExponent::BITS <= Digit::BITS { + exp = 0; + } else { + exp = exp.wrapping_shr(Digit::BITS); + } + i += 1; } - out.powi(exp) + out } -} - -#[test] -fn test_parse() { - use core::str::FromStr; - let digits = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8]; - let parsed = F32::parse(&digits, 0); - let mut s = unsafe { String::from_utf8_unchecked(digits.into_iter().map(|d| d + 48).collect()) }; - s.insert(1, '.'); - s.push_str(""); - println!("{}", s); - println!("{:032b}", parsed.to_bits()); - println!("{:032b}", f32::from_str(&s).unwrap().to_bits()); - println!("F: {}", parsed.to_f32()); - println!("f: {}", f32::from_str(&s).unwrap()); - println!("n: {}", ::from_str_radix(&s, 10).unwrap()); + #[inline] + pub(crate) const fn cast_to_unsigned_float_exponent(self) -> UnsignedFloatExponent { + let mut out = 0; + let mut i = 0; + while i << crate::digit::u8::BIT_SHIFT < UnsignedFloatExponent::BITS as usize && i < W { + out |= (self.digits[i] as UnsignedFloatExponent) << (i << crate::digit::u8::BIT_SHIFT); + i += 1; + } + out + } } impl Float { - #[inline(always)] - const fn from_words(words: [Digit; W]) -> Self { - Self::from_bits(BUintD8::from_digits(words)) - } - - #[inline(always)] - const fn words(&self) -> &[Digit; W] { - &self.bits.digits + const MIN_EXP_MINUS_ONE: FloatExponent = Self::MIN_EXP - 1; + // TODO: write test for this + /// generate powers of two that are not subnormal + const fn normal_power_of_two(exponent: FloatExponent) -> Self { + debug_assert!(exponent >= Self::MIN_EXP_MINUS_ONE); + debug_assert!(exponent < Self::MAX_EXP); + let biased_exponent = exponent + Self::EXP_BIAS; + let exponent_bits = BUintD8::cast_from_unsigned_float_exponent(biased_exponent as UnsignedFloatExponent); + let float_bits = exponent_bits.shl(Self::MB); + Self::from_bits(float_bits) } +} +impl Float { + #[doc = doc::signum!(F)] + #[must_use = doc::must_use_op!(float)] #[inline] - const fn exponent(self) -> BIntD8 { - BIntD8::from_bits(self.exp_mant().0) - } - - /*const fn actual_exponent(self) -> BIntD8 { - self.exponent() - Self::EXP_BIAS - } - const fn unshifted_exponent(self) -> BIntD8 { - BIntD8::from_bits(self.to_bits() & Self::EXPONENT_MASK) - }*/ - const MANTISSA_MASK: BUintD8 = BUintD8::MAX.wrapping_shr(Self::EXPONENT_BITS + 1); - /*const fn mantissa(self) -> BUintD8 { - self.to_bits() & Self::MANTISSA_MASK - } - const fn actual_mantissa(self) -> BUintD8 { - if self.is_subnormal() { - self.mantissa() - } else { - self.mantissa() | (BUintD8::ONE.wrapping_shl(MB)) - } - }*/ - #[inline(always)] - const fn to_int(self) -> BIntD8 { - BIntD8::from_bits(self.to_bits()) + pub const fn signum(self) -> Self { + handle_nan!(Self::NAN; self); + Self::ONE.copysign(self) } + #[doc = doc::copysign!(F)] + #[must_use = doc::must_use_op!(float)] #[inline] pub const fn copysign(self, sign: Self) -> Self { let mut self_words = *self.words(); if sign.is_sign_negative() { - self_words[W - 1] |= 1 << (digit::BITS - 1); + self_words[W - 1] |= 1 << (Digit::BITS - 1); } else { self_words[W - 1] &= (!0) >> 1; } Self::from_bits(BUintD8::from_digits(self_words)) } - #[inline] - pub const fn signum(self) -> Self { - handle_nan!(Self::NAN; self); - Self::ONE.copysign(self) - } - + #[doc = doc::next_up!(F)] #[inline] pub const fn next_up(self) -> Self { use core::num::FpCategory; @@ -171,6 +171,7 @@ impl Float { } } + #[doc = doc::next_down!(F)] #[inline] pub const fn next_down(self) -> Self { use core::num::FpCategory; @@ -203,108 +204,10 @@ impl Default for Float { } } -impl Float { - // split into sign, exponent and mantissa - #[inline] - const fn to_raw_parts(self) -> (bool, BUintD8, BUintD8) { - let sign = self.is_sign_negative(); - let exp = self.bits.bitand(BIntD8::::MAX.to_bits()).shr(Self::MB); - let mant = self.bits.bitand(Self::MANTISSA_MASK); - - (sign, exp, mant) - } - - // split into sign, exponent and mantissa and adjust to reflect actual numerical represenation, but without taking exponent bias into account - #[inline] - const fn to_parts_biased(self) -> (bool, BUintD8, BUintD8) { - let (sign, exp, mant) = self.to_raw_parts(); - if exp.is_zero() { - (sign, BUintD8::ONE, mant) - } else { - (sign, exp, mant.bitor(BUintD8::ONE.shl(Self::MB))) - } - } - - /*// split into sign, exponent and mantissa and adjust to reflect actual numerical represenation - #[inline] - const fn to_parts(self) -> (bool, BIntD8, BUintD8) { - let (sign, exp, mant) = self.to_parts_biased(); - (sign, BIntD8::from_bits(exp).sub(Self::EXP_BIAS), mant) - }*/ - - // construct float from sign, exponent and mantissa - #[inline] - const fn from_raw_parts(sign: bool, exp: BUintD8, mant: BUintD8) -> Self { - debug_assert!( - exp.shl(Self::MB).bitand(mant).is_zero(), - "mantissa and exponent overlap" - ); - let mut bits = exp.shl(Self::MB).bitor(mant); - if sign { - bits.digits[W - 1] |= 1 << (digit::BITS - 1); - } - Self::from_bits(bits) - } - - /*// construct float from sign, exponent and mantissa, adjusted to reflect actual numerical representation - #[inline] - const fn from_parts(sign: bool, exp: BIntD8, mant: BUintD8) -> Self { - let exp = exp.add(Self::EXP_BIAS); - todo!() - }*/ - - #[inline] - const fn exp_mant(&self) -> (BUintD8, BUintD8) { - let bits = self.bits; - let exp = (bits.shl(1)).shr(Self::MB + 1); - let mant = bits.bitand(Self::MANTISSA_MASK); - - if exp.is_zero() { - (BUintD8::ONE, mant) - } else { - (exp, mant.bitor(BUintD8::ONE.shl(Self::MB))) - } - } - - /*#[inline] - pub(super) const fn decode(self) -> (BUintD8, BIntD8) { - let bits = self.bits; - let exp = (bits.shl(1)).shr(Self::MB + 1); - let mant = if exp.is_zero() { - (bits.bitand(Self::MANTISSA_MASK)).shl(1) - } else { - (bits.bitand(Self::MANTISSA_MASK)).bitor((BUintD8::power_of_two(Self::MB))) - }; - let exp = BIntD8::from_bits(exp) - .sub(Self::EXP_BIAS) - .add(MB.as_::>()); - (mant, exp) - }*/ - - #[inline] - const fn from_exp_mant(negative: bool, exp: BUintD8, mant: BUintD8) -> Self { - let mut bits = (exp.shl(Self::MB)).bitor(mant); - if negative { - bits = bits.bitor(BIntD8::MIN.to_bits()); - } - let f = Self::from_bits(bits); - f - } -} - -#[cfg(test)] -impl From for F64 { - #[inline] - fn from(f: f64) -> Self { - Self::from_bits(f.to_bits().into()) - } -} - -#[cfg(test)] -impl From for F32 { - #[inline] - fn from(f: f32) -> Self { - Self::from_bits(f.to_bits().into()) +#[cfg(any(test, feature = "quickcheck"))] +impl quickcheck::Arbitrary for crate::Float { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + Self::from_bits(BUintD8::arbitrary(g)) } } @@ -314,162 +217,15 @@ mod tests { use crate::test::types::{ftest, FTEST}; test_bignum! { - function: ::copysign(f1: ftest, f2: ftest) + function: ::copysign(a: ftest, b: ftest) } - test_bignum! { - function: ::signum(f: ftest) + function: ::signum(a: ftest) } - test_bignum! { - function: ::next_up(f: ftest) + function: ::next_up(a: ftest) } - test_bignum! { - function: ::next_down(f: ftest) + function: ::next_down(a: ftest) } -} - - -// TODO: create round-to-nearest ties-to-even function, it could take a uint and a target bit width, and return the correctly rounded result in the target precision, as well as the overflow, and whether a round up occurred -// #[allow(unused)] -// fn f64_as_f32(f: f64) -> f32 { -// if f.is_infinite() { -// return if f.is_sign_negative() { -// f32::NEG_INFINITY -// } else { -// f32::INFINITY -// }; -// } -// if f == 0.0 && f.is_sign_positive() { -// return 0.0; -// } -// if f == 0.0 && f.is_sign_negative() { -// return -0.0; -// } -// let bits = f.to_bits(); -// let mut mant = bits & 0xfffffffffffff; -// let mut exp = ((bits & (i64::MAX as u64)) >> 52) as i32; -// if exp != 0 { -// mant |= 0x10000000000000; - -// } else { -// exp = 1; -// } -// exp -= 1023; -// //println!("exp: {}", exp); -// let mut mantissa_shift = 52 - 23; -// /*if mant.leading_zeros() != 64 - (52 + 1) { -// exp -// }*/ -// if exp >= f32::MAX_EXP { -// return if f.is_sign_negative() { -// f32::NEG_INFINITY -// } else { -// f32::INFINITY -// }; -// } -// if exp < f32::MIN_EXP - 1 { -// let diff = (f32::MIN_EXP - 1) - exp; -// mantissa_shift += diff; -// exp = -(f32::MAX_EXP - 1); -// } -// let new_mant = mant.checked_shr(mantissa_shift as u32).unwrap_or(0); -// //println!("{:025b}", new_mant); - -// let shifted_back = new_mant.checked_shl(mantissa_shift as u32).unwrap_or(0); -// let overflow = mant ^ shifted_back; -// /*println!("overflow: {:029b}", overflow); -// println!("mant: {:053b}", mant); -// println!("shbk: {:053b}", shifted_back); -// println!("lz: {}", overflow.leading_zeros());*/ -// if overflow.leading_zeros() as i32 == 64 - mantissa_shift { // this means there is a one at the overflow bit -// if overflow.count_ones() == 1 { // this means the overflowing is 100...00 so apply ties-to-even rounding -// if new_mant & 1 == 1 { // if the last bit is 1, then we round up -// mant = new_mant + 1; -// //println!("updated mant: {:025b}", mant); -// } else { // otherwise we round down -// mant = new_mant; -// } -// } else { -// mant = new_mant + 1; // round up -// } -// } else { -// mant = new_mant; -// } -// //1111111111111111111111111 -// //111111111111111111111111 -// if mant.leading_zeros() < 64 - (23 + 1) { -// // println!("mant overflow"); -// mant >>= 1; -// exp += 1; -// } -// if exp > f32::MAX_EXP { -// return if f.is_sign_negative() { -// f32::NEG_INFINITY -// } else { -// f32::INFINITY -// }; -// } -// mant ^= 0x800000; -// let sign = (bits >> 63) as u32; -// let exp = (exp + (f32::MAX_EXP - 1)) as u32; -// let mant = mant as u32; -// let bits = (sign << 31) | (exp << 23) | mant; -// f32::from_bits(bits) -// } - -// #[cfg(test)] -// quickcheck::quickcheck! { -// fn qc_test_f64_as_f32(f: f64) -> quickcheck::TestResult { -// if !f.is_finite() { -// return quickcheck::TestResult::discard(); -// } -// let f2 = f64_as_f32(f); -// let f3 = f as f32; -// quickcheck::TestResult::from_bool(f2 == f3) -// } -// } - -// type U32 = BUintD32::<1>; -// fn parse(s: &str) -> (types::U128, U32) { -// let mut radix = 10; -// let mut custom_radix = false; -// let mut src = s; -// let bytes = s.as_bytes(); -// let len = bytes.len(); -// let mut first_char_zero = false; -// let mut bit_width = U32::power_of_two(7); -// let mut i = 0; -// while i < len { -// let byte = bytes[i]; -// if i == 0 && byte == b'0' { -// first_char_zero = true; -// } else if i == 1 && first_char_zero && (byte == b'b' || byte == b'o' || byte == b'x') { -// let ptr = unsafe { src.as_ptr().add(2) }; -// let new = core::ptr::slice_from_raw_parts(ptr, src.len() - 2); -// src = unsafe { &*(new as *const str) }; -// radix = match byte { -// b'b' => 2, -// b'o' => 8, -// b'x' => 16, -// _ => unreachable!(), -// }; -// custom_radix = true; -// } -// if i != 0 && i != len - 1 && byte == b'u' { -// let old_len = src.len(); -// let ptr = src.as_ptr(); - -// let new_len = if custom_radix { i - 2 } else { i }; -// let bit_width_ptr = core::ptr::slice_from_raw_parts(unsafe { ptr.add(new_len + 1) }, old_len - new_len - 1); -// let new = core::ptr::slice_from_raw_parts(ptr, new_len); -// src = unsafe { &*(new as *const str) }; -// let bit_width_str = unsafe { &*(bit_width_ptr as *const str) }; -// bit_width = U32::parse_str_radix(bit_width_str, 10); -// break; -// } -// i += 1; -// } -// (types::U128::parse_str_radix(src, radix), bit_width) -// } \ No newline at end of file +} \ No newline at end of file diff --git a/src/float/numtraits.rs b/src/float/numtraits.rs new file mode 100644 index 0000000..0d3bcea --- /dev/null +++ b/src/float/numtraits.rs @@ -0,0 +1,77 @@ +use super::Float; +use num_traits::{Bounded, ConstZero, ConstOne, One, Zero, Signed, Euclid}; + +impl Bounded for Float { + #[inline] + fn min_value() -> Self { + Self::MIN + } + + #[inline] + fn max_value() -> Self { + Self::MAX + } +} + +impl ConstZero for Float { + const ZERO: Self = Self::ZERO; +} + +impl ConstOne for Float { + const ONE: Self = Self::ONE; +} + +impl Zero for Float { + #[inline] + fn zero() -> Self { + Self::ZERO + } + + #[inline] + fn is_zero(&self) -> bool { + Self::is_zero(&self) + } +} + +impl One for Float { + #[inline] + fn one() -> Self { + Self::ONE + } + + #[inline] + fn is_one(&self) -> bool { + Self::ONE.eq(&self) + } +} + +// impl Signed for Float { +// #[inline] +// fn is_negative(&self) -> bool { +// Self::is_sign_negative(*self) +// } + +// #[inline] +// fn is_positive(&self) -> bool { +// Self::is_sign_positive(*self) +// } + +// #[inline] +// fn abs(&self) -> Self { +// Self::abs(*self) +// } + +// #[inline] +// fn abs_sub(&self, other: &Self) -> Self { +// if self <= other { +// Self::ZERO +// } else { +// *self - *other +// } +// } + +// #[inline] +// fn signum(&self) -> Self { +// Self::signum(*self) +// } +// } \ No newline at end of file diff --git a/src/float/ops.rs b/src/float/ops.rs deleted file mode 100644 index b529bb3..0000000 --- a/src/float/ops.rs +++ /dev/null @@ -1,810 +0,0 @@ -use super::Float; -use crate::cast::As; -use crate::{BIntD8, BUintD8, ExpType}; -use core::iter::{Iterator, Product, Sum}; -use core::num::FpCategory; -use core::ops::{Add, Div, Mul, Neg, Rem, Sub}; - -type Digit = u8; - -// TODO: fix this function -impl Float { - #[inline] - fn add_internal(mut self, mut rhs: Self, negative: bool) -> Self { - //println!("{:?} {:?}", self, rhs); - //debug_assert_eq!(self.is_sign_negative(), rhs.is_sign_negative()); - if rhs.abs() > self.abs() { - // If b has a larger exponent than a, swap a and b so that a has the larger exponent - core::mem::swap(&mut self, &mut rhs); - } - let (_, mut a_exp, mut a_mant) = self.to_parts_biased(); - let (_, b_exp, mut b_mant) = rhs.to_parts_biased(); - - let exp_diff = a_exp - b_exp; - - let sticky_bit = BUintD8::from(b_mant.trailing_zeros() + 1) < exp_diff; - - // Append extra bits to the mantissas to ensure correct rounding - a_mant <<= 2 as ExpType; - b_mant <<= 2 as ExpType; - - // If the shift causes an overflow, the b_mant is too small so is set to 0 - b_mant = match exp_diff.try_into().ok() { - Some(exp_diff) => b_mant.checked_shr(exp_diff).unwrap_or(BUintD8::ZERO), - None => BUintD8::ZERO, - }; - - if sticky_bit { - b_mant |= BUintD8::ONE; // round up - } - - let mut mant = a_mant + b_mant; - - let overflow = !(mant >> (MB + 3)).is_zero(); - if !overflow { - if mant & BUintD8::from_digit(0b11) == BUintD8::from_digit(0b11) - || mant & BUintD8::from_digit(0b110) == BUintD8::from_digit(0b110) - { - mant += BUintD8::FOUR; - if !(mant >> (MB + 3)).is_zero() { - mant >>= 1 as ExpType; - a_exp += BUintD8::ONE; - } - } - } else { - match (mant & BUintD8::from_digit(0b111)).digits()[0] { - 0b111 | 0b110 | 0b101 => { - mant += BUintD8::EIGHT; - } - 0b100 => { - if mant & BUintD8::from_digit(0b1000) == BUintD8::from_digit(0b1000) { - mant += BUintD8::EIGHT; // 0b1000 - } - } - _ => {} - } - - mant >>= 1 as ExpType; - a_exp += BUintD8::ONE; - } - if a_exp > Self::MAX_UNBIASED_EXP { - return if negative { - Self::NEG_INFINITY - } else { - Self::INFINITY - }; - } - - mant >>= 2 as ExpType; - - if (mant >> Self::MB).is_zero() { - a_exp = BUintD8::ZERO; - } else { - mant ^= BUintD8::ONE << Self::MB; - } - let a = Self::from_raw_parts(negative, a_exp, mant); - a - } -} - -impl Add for Float { - type Output = Self; - - #[inline] - fn add(self, rhs: Self) -> Self { - let self_negative = self.is_sign_negative(); - let rhs_negative = rhs.is_sign_negative(); - let a = match (self.classify(), rhs.classify()) { - (FpCategory::Nan, _) => self, - (_, FpCategory::Nan) => rhs, - (FpCategory::Infinite, FpCategory::Infinite) => { - if self_negative ^ rhs_negative { - Self::NAN - } else { - self - } - } - (FpCategory::Infinite, _) => self, - (_, FpCategory::Infinite) => rhs, - (FpCategory::Zero, FpCategory::Zero) => { - if self_negative && rhs_negative { - Self::NEG_ZERO - } else { - Self::ZERO - } - } - (_, _) => { - if self_negative ^ rhs_negative { - self.sub_internal(rhs, self_negative) - } else { - let r = self.add_internal(rhs, self_negative); - r - } - } - }; - //assert_eq!(a.to_bits(), (self.to_f64() + rhs.to_f64()).to_bits().into()); - a - } -} - -//crate::errors::op_ref_impl!(Add> for Float, add); - -impl Sum for Float { - #[inline] - fn sum>(iter: I) -> Self { - iter.fold(Self::ZERO, |a, b| a + b) - } -} - -impl<'a, const W: usize, const MB: usize> Sum<&'a Self> for Float { - #[inline] - fn sum>(iter: I) -> Self { - iter.fold(Self::ONE, |a, b| a + *b) - } -} - -impl Float { - #[inline] - fn sub_internal(mut self, mut rhs: Self, mut negative: bool) -> Self { - if rhs.abs() > self.abs() { - // If b has a larger exponent than a, swap a and b so that a has the larger exponent - negative = !negative; - core::mem::swap(&mut self, &mut rhs); - } - if self.abs() == rhs.abs() { - return Self::ZERO; - } - - let (_, a_exp, mut a_mant) = self.to_parts_biased(); - let (_, b_exp, mut b_mant) = rhs.to_parts_biased(); - let exp_diff = a_exp - b_exp; - - let mut a_exp = BIntD8::from_bits(a_exp); - - let sticky_bit2 = !exp_diff.is_zero() - && exp_diff < BUintD8::::BITS.into() - && b_mant.bit(exp_diff.as_::() - 1); - let all_zeros = - !exp_diff.is_zero() && b_mant.trailing_zeros() + 1 == exp_diff.as_::(); - - // Append extra bits to the mantissas to ensure correct rounding - a_mant = a_mant << 1 as ExpType; - b_mant = b_mant << 1 as ExpType; - - let sticky_bit = b_mant.trailing_zeros() < exp_diff.as_(); - - // If the shift causes an overflow, the b_mant is too small so is set to 0 - let shifted_b_mant = match exp_diff.try_into().ok() { - Some(exp_diff) => b_mant.checked_shr(exp_diff).unwrap_or(BUintD8::ZERO), - None => BUintD8::ZERO, - }; - - // If the shift causes an overflow, the b_mant is too small so is set to 0 - - if sticky_bit { - //b_mant |= 1; - } - - let mut mant = a_mant - shifted_b_mant; - - if mant.bits() == Self::MB + 2 { - if mant & BUintD8::from(0b10u8) == BUintD8::from(0b10u8) && !sticky_bit { - mant += BUintD8::ONE; - } - - mant >>= 1 as ExpType; - } else { - a_exp -= BIntD8::ONE; - a_mant <<= 1 as ExpType; - b_mant <<= 1 as ExpType; - - let sticky_bit = b_mant.trailing_zeros() < exp_diff.as_(); - - // If the shift causes an overflow, the b_mant is too small so is set to 0 - let shifted_b_mant = match exp_diff.try_into().ok() { - Some(exp_diff) => b_mant.checked_shr(exp_diff).unwrap_or(BUintD8::ZERO), - None => BUintD8::ZERO, - }; - - // If the shift causes an overflow, the b_mant is too small so is set to 0 - - if sticky_bit { - //b_mant |= 1; - } - - mant = a_mant - shifted_b_mant; - - if mant.bits() == Self::MB + 2 { - if mant & BUintD8::from(0b10u8) == BUintD8::from(0b10u8) && !sticky_bit { - mant += BUintD8::ONE; - } - - mant >>= 1 as ExpType; - } else { - let _half_way = (); // TODO - //println!("sticky: {}", sticky_bit); - if sticky_bit2 && !all_zeros - || (sticky_bit2 - && all_zeros - && b_mant & BUintD8::from(0b1u8) == BUintD8::from(0b1u8)) - { - //println!("sub"); - mant -= BUintD8::ONE; - } - let bits = mant.bits(); - mant <<= Self::MB + 1 - bits; - a_exp -= (MB as i64 + 2 - bits as i64).as_::>(); - if !a_exp.is_positive() { - a_exp = BIntD8::ONE; - mant >>= BIntD8::ONE - a_exp; - } - } - } - - if (mant >> Self::MB).is_zero() { - a_exp = BIntD8::ZERO; - } else { - mant ^= BUintD8::ONE << Self::MB; - } - - Self::from_raw_parts(negative, a_exp.to_bits(), mant) - } -} - -impl Sub for Float { - type Output = Self; - - #[inline] - fn sub(self, rhs: Self) -> Self { - //println!("{:064b} {:064b}", self.to_bits(), rhs.to_bits()); - match (self.classify(), rhs.classify()) { - (FpCategory::Nan, _) => self, - (_, FpCategory::Nan) => rhs, - (FpCategory::Infinite, FpCategory::Infinite) => { - match (self.is_sign_negative(), rhs.is_sign_negative()) { - (true, false) => Self::NEG_INFINITY, - (false, true) => Self::INFINITY, - _ => Self::NAN, - } - } - (FpCategory::Infinite, _) => self, - (_, FpCategory::Infinite) => rhs.neg(), - (_, _) => { - let self_negative = self.is_sign_negative(); - let rhs_negative = rhs.is_sign_negative(); - if self_negative ^ rhs_negative { - self.add_internal(rhs, self_negative) - } else { - self.sub_internal(rhs, self_negative) - } - } - } - } -} - -//crate::errors::op_ref_impl!(Sub> for Float, sub); - -impl Float { - #[inline] - fn _mul_internal_old(self, rhs: Self, negative: bool) -> Self - where - [(); W * 2]:, - { - let (a, b) = (self, rhs); - let (exp_a, mant_a) = a.exp_mant(); - let (exp_b, mant_b) = b.exp_mant(); - - // TODO: make so as_ can infer type so can switch trait definition if needed - let mant_prod = mant_a.as_::>() * mant_b.as_::>(); - - let prod_bits = mant_prod.bits(); - - if prod_bits == 0 { - return if negative { Self::NEG_ZERO } else { Self::ZERO }; - } - - let extra_bits = if prod_bits > (Self::MB + 1) { - prod_bits - (Self::MB + 1) - } else { - 0 - }; - - let mut exp = - BIntD8::from_bits(exp_a) + BIntD8::from_bits(exp_b) + BIntD8::from(extra_bits) - - Self::EXP_BIAS - - BIntD8::from(MB); - - if exp > Self::MAX_EXP + Self::EXP_BIAS - BIntD8::ONE { - //println!("rhs: {}", rhs.to_bits()); - return if negative { - Self::NEG_INFINITY - } else { - Self::INFINITY - }; - } - - let mut extra_shift = BUintD8::ZERO; - if !exp.is_positive() { - extra_shift = (BIntD8::ONE - exp).to_bits(); - exp = BIntD8::ONE; - } - let total_shift = BUintD8::from(extra_bits) + extra_shift; - - let sticky_bit = BUintD8::from(mant_prod.trailing_zeros() + 1) < total_shift; - let mut mant = match (total_shift - BUintD8::ONE).to_exp_type() { - Some(sub) => mant_prod.checked_shr(sub).unwrap_or(BUintD8::ZERO), - None => BUintD8::ZERO, - }; - if mant & BUintD8::ONE == BUintD8::ONE { - if sticky_bit || mant & BUintD8::from(0b11u8) == BUintD8::from(0b11u8) { - // Round up - mant += BUintD8::ONE; - } - } - mant >>= 1 as ExpType; - - if exp == BIntD8::ONE && mant.bits() < Self::MB + 1 { - return Self::from_exp_mant(negative, BUintD8::ZERO, mant.as_()); - } - if mant >> Self::MB != BUintD8::ZERO { - mant ^= BUintD8::ONE << Self::MB; - } - Self::from_exp_mant(negative, exp.to_bits(), mant.as_()) - } - - #[inline] - fn mul_internal(self, rhs: Self, negative: bool) -> Self { - let (a, b) = (self, rhs); - let (_, exp_a, mant_a) = a.to_parts_biased(); - let (_, exp_b, mant_b) = b.to_parts_biased(); - - // TODO: make so as_ can infer type so can switch trait definition if needed - let mut mant_prod = mant_a.widening_mul(mant_b); - - let prod_bits = if mant_prod.1.bits() == 0 { - mant_prod.0.bits() - } else { - mant_prod.1.bits() + Self::BITS - }; - - if prod_bits == 0 { - return if negative { Self::NEG_ZERO } else { Self::ZERO }; - } - - let extra_bits = if prod_bits > (Self::MB + 1) { - prod_bits - (Self::MB + 1) - } else { - 0 - }; - - let mut exp = - BIntD8::from_bits(exp_a) + BIntD8::from_bits(exp_b) + BIntD8::from(extra_bits) - - Self::EXP_BIAS - - BIntD8::from(MB); - - if exp > Self::MAX_EXP + Self::EXP_BIAS - BIntD8::ONE { - //println!("rhs: {}", rhs.to_bits()); - return if negative { - Self::NEG_INFINITY - } else { - Self::INFINITY - }; - } - - let mut extra_shift = BUintD8::ZERO; - if !exp.is_positive() { - extra_shift = (BIntD8::ONE - exp).to_bits(); - exp = BIntD8::ONE; - } - let total_shift = BUintD8::from(extra_bits) + extra_shift; - - let mp0tz = mant_prod.0.trailing_zeros(); - let tz = if mp0tz == Self::BITS { - mant_prod.1.trailing_zeros() + Self::BITS - } else { - mp0tz - }; - - let sticky_bit = BUintD8::from(tz + 1) < total_shift; - let mut mant = match (total_shift - BUintD8::ONE).to_exp_type() { - Some(sub) => { - if sub > Self::BITS * 2 { - (BUintD8::ZERO, BUintD8::ZERO) - } else if sub >= Self::BITS { - (mant_prod.1 >> (sub - Self::BITS), BUintD8::ZERO) - } else { - let mask = BUintD8::MAX >> (Self::BITS - sub); - let carry = mant_prod.1 & mask; - mant_prod.1 >>= sub; - mant_prod.0 = (mant_prod.0 >> sub) | (carry << (Self::BITS - sub)); - mant_prod - } - } - None => (BUintD8::ZERO, BUintD8::ZERO), - }; - if mant.0.bit(0) { - if sticky_bit || mant.0.bit(1) { - // Round up - let (sum, carry) = mant.0.overflowing_add(BUintD8::ONE); - mant.0 = sum; - if carry { - mant.1 += BUintD8::ONE; - } - } - } - { - let carry = mant.1.bit(0); - //mant.1 >>= 1 as ExpType; - mant.0 >>= 1 as ExpType; - if carry { - mant.0 |= BIntD8::MIN.to_bits(); - } - } - - let mut m1b = mant.1.bits(); - if m1b != 0 { - m1b -= 1; - } - /*let bits = if m1b == 0 { - mant.0.bits() - } else { - m1b + Self::BITS - };*/ - let m0b = mant.0.bits(); - if m0b > Self::MB + 1 { - // it's possible that the mantissa has too many bits, so shift it right and increase the exponent until it has the correct number of bits - let inc = m0b - (Self::MB + 1); - mant.0 = mant.0 >> inc; - exp += BIntD8::from(inc); - } - - if exp > Self::MAX_EXP + Self::EXP_BIAS - BIntD8::ONE { - return if negative { - Self::NEG_INFINITY - } else { - Self::INFINITY - }; - } - - if exp == BIntD8::ONE && m1b < Self::MB + 1 { - return Self::from_raw_parts(negative, BUintD8::ZERO, mant.0); - } - //if mant >> Self::MB != BUintD8::ZERO { - mant.0 ^= BUintD8::ONE << Self::MB; - //} - Self::from_raw_parts(negative, exp.to_bits(), mant.0) - } -} - -impl Mul for Float { - type Output = Self; - - #[inline] - fn mul(self, rhs: Self) -> Self { - let negative = self.is_sign_negative() ^ rhs.is_sign_negative(); - match (self.classify(), rhs.classify()) { - (FpCategory::Nan, _) | (_, FpCategory::Nan) => return Self::NAN, - (FpCategory::Infinite, FpCategory::Zero) | (FpCategory::Zero, FpCategory::Infinite) => { - Self::NAN - } - (FpCategory::Infinite, _) | (_, FpCategory::Infinite) => { - if negative { - Self::NEG_INFINITY - } else { - Self::INFINITY - } - } - (_, _) => self.mul_internal(rhs, negative), - } - } -} - -impl Product for Float -where - [(); W * 2]:, -{ - #[inline] - fn product>(iter: I) -> Self { - iter.fold(Self::ONE, |a, b| a * b) - } -} - -impl<'a, const W: usize, const MB: usize> Product<&'a Self> for Float -where - [(); W * 2]:, -{ - #[inline] - fn product>(iter: I) -> Self { - iter.fold(Self::ONE, |a, b| a * *b) - } -} - -impl Float { - #[inline] - fn div_internal(self, rhs: Self, negative: bool) -> Self - where - [(); W * 2]:, - { - let (a, b) = (self, rhs); - let (_, e1, s1) = a.to_parts_biased(); - let (_, e2, s2) = b.to_parts_biased(); - - let b1 = s1.bits(); - let b2 = s2.bits(); - - let mut e = - BIntD8::from_bits(e1) - BIntD8::from_bits(e2) + Self::EXP_BIAS + BIntD8::from(b1) - - BIntD8::from(b2) - - BIntD8::ONE; - - let mut extra_shift = BUintD8::ZERO; - if !e.is_positive() { - extra_shift = (BIntD8::ONE - e).to_bits(); - e = BIntD8::ONE; - } - - let total_shift = - BIntD8::from(MB as i32 + 1 + b2 as i32 - b1 as i32) - BIntD8::from_bits(extra_shift); - - let large = if !total_shift.is_negative() { - (s1.as_::>()) << total_shift - } else { - (s1.as_::>()) >> (-total_shift) - }; - let mut division = (large / (s2.as_::>())).as_::>(); - - let rem = if division.bits() != Self::MB + 2 { - let rem = (large % (s2.as_::>())).as_::>(); - rem - } else { - e += BIntD8::ONE; - division = - ((large >> 1 as ExpType) / (s2.as_::>())).as_::>(); - //println!("div {} {}", large >> 1u8, s2); - let rem = - ((large >> 1 as ExpType) % (s2.as_::>())).as_::>(); - rem - }; - //println!("{}", rem); - if rem * BUintD8::TWO > s2 { - division += BUintD8::ONE; - } else if rem * BUintD8::TWO == s2 { - if (division & BUintD8::ONE) == BUintD8::ONE { - division += BUintD8::ONE; - } - } - if division.bits() == Self::MB + 2 { - e += BIntD8::ONE; - division >>= 1 as ExpType; - } - - if e > Self::MAX_EXP + Self::EXP_BIAS - BIntD8::ONE { - return Self::INFINITY; - } - - //println!("{:032b}", division); - - if e == BIntD8::ONE && division.bits() < Self::MB + 1 { - return Self::from_exp_mant(negative, BUintD8::ZERO, division); - } - - if division >> Self::MB != BUintD8::ZERO { - division ^= BUintD8::ONE << Self::MB; - } - Self::from_exp_mant(negative, e.to_bits(), division) - } -} - -impl Div for Float -where - [(); W * 2]:, -{ - type Output = Self; - - #[inline] - fn div(self, rhs: Self) -> Self { - let negative = self.is_sign_negative() ^ rhs.is_sign_negative(); - match (self.classify(), rhs.classify()) { - (FpCategory::Nan, _) | (_, FpCategory::Nan) => Self::NAN, - (FpCategory::Infinite, FpCategory::Infinite) => Self::NAN, - (FpCategory::Zero, FpCategory::Zero) => Self::NAN, - (FpCategory::Infinite, _) | (_, FpCategory::Zero) => { - if negative { - Self::NEG_INFINITY - } else { - Self::INFINITY - } - } - (FpCategory::Zero, _) | (_, FpCategory::Infinite) => { - if negative { - Self::NEG_ZERO - } else { - Self::ZERO - } - } - (_, _) => self.div_internal(rhs, negative), - } - } -} - -impl Rem for Float { - type Output = Self; - - #[inline] - fn rem(self, y: Self) -> Self { - handle_nan!(self; self); - handle_nan!(y; y); - - if y.is_zero() { - return Self::NAN; - } - if self.is_zero() { - return self; - } - if self.is_infinite() { - return Self::NAN; - } - if y.is_infinite() { - return self; - } - - let mut uxi = self.to_bits(); - let mut uyi = y.to_bits(); - let mut ex = self.exponent(); - let mut ey = y.exponent(); - let mut i; - - if uxi << 1 as ExpType <= uyi << 1 as ExpType { - if uxi << 1 as ExpType == uyi << 1 as ExpType { - return if self.is_sign_negative() { - Self::NEG_ZERO - } else { - Self::ZERO - }; - } - - return self; - } - - /* normalize x and y */ - if ex.is_zero() { - i = uxi << (Self::BITS - Self::MB); - while !BIntD8::from_bits(i).is_negative() { - ex -= BIntD8::ONE; - i <<= 1 as ExpType; - } - - uxi <<= -ex + BIntD8::ONE; - } else { - uxi &= BUintD8::MAX >> (Self::BITS - Self::MB); - uxi |= BUintD8::ONE << Self::MB; - } - //println!("{}", i); - - if ey.is_zero() { - i = uyi << (Self::BITS - Self::MB); - while !BIntD8::from_bits(i).is_negative() { - ey -= BIntD8::ONE; - i <<= 1 as ExpType; - } - - uyi <<= -ey + BIntD8::ONE; - } else { - uyi &= BUintD8::MAX >> (Self::BITS - Self::MB); - uyi |= BUintD8::ONE << Self::MB; - } - /* x mod y */ - while ex > ey { - i = uxi.wrapping_sub(uyi); - if !BIntD8::from_bits(i).is_negative() { - if i.is_zero() { - return if self.is_sign_negative() { - Self::NEG_ZERO - } else { - Self::ZERO - }; - } - uxi = i; - } - uxi <<= 1 as ExpType; - - ex -= BIntD8::ONE; - } - - i = uxi.wrapping_sub(uyi); - if !BIntD8::from_bits(i).is_negative() { - if i.is_zero() { - return if self.is_sign_negative() { - Self::NEG_ZERO - } else { - Self::ZERO - }; - } - uxi = i; - } - - while (uxi >> Self::MB).is_zero() { - uxi <<= 1 as ExpType; - ex -= BIntD8::ONE; - } - - /* scale result up */ - if ex.is_positive() { - uxi -= BUintD8::ONE << Self::MB; - uxi |= (ex.to_bits()) << Self::MB; - } else { - uxi >>= -ex + BIntD8::ONE; - } - - let f = Self::from_bits(uxi); - if self.is_sign_negative() { - -f - } else { - f - } - } -} - -impl Float { - #[inline] - pub const fn neg(mut self) -> Self { - self.bits.digits[W - 1] ^= 1 << (Digit::BITS - 1); - self - } -} - -crate::nightly::impl_const! { - impl const Neg for Float { - type Output = Self; - - #[inline] - fn neg(self) -> Self { - Self::neg(self) - } - } -} - -crate::nightly::impl_const! { - impl const Neg for &Float { - type Output = Float; - - #[inline] - fn neg(self) -> Float { - (*self).neg() - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test::test_bignum; - use crate::test::types::{ftest, FTEST}; - - /*test_bignum! { - function: ::add(a: ftest, b: ftest) - }*/ - - test_bignum! { - function: ::sub(a: ftest, b: ftest) - } - - test_bignum! { - function: ::mul(a: ftest, b: ftest), - cases: [ - (5.6143642e23f64 as ftest, 35279.223f64 as ftest) - ] - } - - test_bignum! { - function: ::div(a: ftest, b: ftest) - } - - test_bignum! { - function: ::rem(a: ftest, b: ftest) - } - - test_bignum! { - function: ::neg(f: ftest) - } -} diff --git a/src/float/ops/add.rs b/src/float/ops/add.rs new file mode 100644 index 0000000..720d01b --- /dev/null +++ b/src/float/ops/add.rs @@ -0,0 +1,119 @@ +use core::num::FpCategory; +use crate::{float::UnsignedFloatExponent, BUintD8, ExpType}; +use super::Float; + +// TODO: quickcheck tests are very occasionally failing, need to fix this function +impl Float { + #[inline] + pub(super) fn add_internal(self, rhs: Self, negative: bool) -> Self { + let (a, b) = if rhs.abs().gt(&self.abs()) { + // If b has a larger exponent than a, swap a and b so that a has the larger exponent + (rhs, self) + } else { + (self, rhs) + }; + let (_, mut a_exp, mut a_mant) = a.into_biased_parts(); + let (_, b_exp, mut b_mant) = b.into_biased_parts(); + + let exp_diff = a_exp - b_exp; + + let sticky_bit = b_mant.trailing_zeros() as UnsignedFloatExponent + 1 < exp_diff; + + // Append extra bits to the mantissas to ensure correct rounding + a_mant <<= 2 as ExpType; + b_mant <<= 2 as ExpType; + + // If the shift causes an overflow, the b_mant is too small so is set to 0 + b_mant = if UnsignedFloatExponent::BITS - exp_diff.leading_zeros() <= ExpType::BITS { // number of bits needed to store exp_diff is less than bit width of ExpType, so can cast + b_mant.checked_shr(exp_diff as ExpType).unwrap_or(BUintD8::ZERO) + } else { + BUintD8::ZERO + }; + + if sticky_bit { + b_mant |= BUintD8::ONE; // round up + } + + let mut mant = a_mant + b_mant; + + let overflow = !(mant >> (MB + 3)).is_zero(); + if !overflow { + if mant & BUintD8::from_digit(0b11) == BUintD8::from_digit(0b11) + || mant & BUintD8::from_digit(0b110) == BUintD8::from_digit(0b110) + { + mant += BUintD8::FOUR; + if !(mant >> (MB + 3)).is_zero() { + mant >>= 1 as ExpType; + a_exp += 1; + } + } + } else { + match (mant & BUintD8::from_digit(0b111)).digits()[0] { + 0b111 | 0b110 | 0b101 => { + mant += BUintD8::EIGHT; + } + 0b100 => { + if mant & BUintD8::from_digit(0b1000) == BUintD8::from_digit(0b1000) { + mant += BUintD8::EIGHT; // 0b1000 + } + } + _ => {} + } + + mant >>= 1 as ExpType; + a_exp += 1; + } + if a_exp > Self::MAX_UNBIASED_EXP { + return if negative { + Self::NEG_INFINITY + } else { + Self::INFINITY + }; + } + + mant >>= 2 as ExpType; + + if (mant >> Self::MB).is_zero() { + a_exp = 0; + } else { + mant ^= BUintD8::ONE << Self::MB; + } + let a = Self::from_raw_parts(negative, a_exp, mant); + a + } + + #[inline] + pub(crate) fn add(self, rhs: Self) -> Self { + let self_negative = self.is_sign_negative(); + let rhs_negative = rhs.is_sign_negative(); + + match (self.classify(), rhs.classify()) { + (FpCategory::Nan, _) => self, + (_, FpCategory::Nan) => rhs, + (FpCategory::Infinite, FpCategory::Infinite) => { + if self_negative ^ rhs_negative { + Self::NAN + } else { + self + } + } + (FpCategory::Infinite, _) => self, + (_, FpCategory::Infinite) => rhs, + (FpCategory::Zero, FpCategory::Zero) => { + if self_negative && rhs_negative { + Self::NEG_ZERO + } else { + Self::ZERO + } + } + (_, _) => { + if self_negative ^ rhs_negative { + self.sub_internal(rhs, self_negative) + } else { + let r = self.add_internal(rhs, self_negative); + r + } + } + } + } +} \ No newline at end of file diff --git a/src/float/ops/div.rs b/src/float/ops/div.rs new file mode 100644 index 0000000..e2a8470 --- /dev/null +++ b/src/float/ops/div.rs @@ -0,0 +1,295 @@ +use core::num::FpCategory; +use crate::float::FloatExponent; +use crate::float::UnsignedFloatExponent; +use crate::ExpType; +use crate::BUintD8; +use super::Float; +use crate::cast::As; + +impl Float { + #[inline] + pub(crate) fn div_internal(self, rhs: Self, negative: bool) -> Self + where + [(); W * 2]:, + { + let (a, b) = (self, rhs); + let (_, e1, s1) = a.into_biased_parts(); + let (_, e2, s2) = b.into_biased_parts(); + + let b1 = s1.bits(); + let b2 = s2.bits(); + + let mut e = + (e1 as FloatExponent) - (e2 as FloatExponent) + Self::EXP_BIAS + (b1 as FloatExponent) + - (b2 as FloatExponent) + - 1; + + let mut extra_shift = 0; + if !e.is_positive() { + extra_shift = (1 - e) as UnsignedFloatExponent; + e = 1; + } + + let total_shift = (MB as FloatExponent + 1 + b2 as FloatExponent - b1 as FloatExponent) - (extra_shift as FloatExponent); + + let large = if !total_shift.is_negative() { + (s1.as_::>()) << total_shift + } else { + (s1.as_::>()) >> (-total_shift) + }; + let mut division = (large / (s2.as_::>())).as_::>(); + + let rem = if division.bits() != Self::MB + 2 { + let rem = (large % (s2.as_::>())).as_::>(); + rem + } else { + e += 1; + division = + ((large >> 1 as ExpType) / (s2.as_::>())).as_::>(); + let rem = + ((large >> 1 as ExpType) % (s2.as_::>())).as_::>(); + rem + }; + if rem * BUintD8::TWO > s2 { + division += BUintD8::ONE; + } else if rem * BUintD8::TWO == s2 { + if (division & BUintD8::ONE) == BUintD8::ONE { + division += BUintD8::ONE; + } + } + if division.bits() == Self::MB + 2 { + e += 1; + division >>= 1 as ExpType; + } + + if e > Self::MAX_EXP + Self::EXP_BIAS - 1 { + return Self::INFINITY; + } + + if e == 1 && division.bits() < Self::MB + 1 { + return Self::from_raw_parts(negative, 0, division); + } + + if division >> Self::MB != BUintD8::ZERO { + division ^= BUintD8::ONE << Self::MB; + } + Self::from_raw_parts(negative, e as UnsignedFloatExponent, division) + } + + #[inline] + pub(super) fn div(self, rhs: Self) -> Self + where + [(); W * 2]:, + { + let negative = self.is_sign_negative() ^ rhs.is_sign_negative(); + match (self.classify(), rhs.classify()) { + (FpCategory::Nan, _) | (_, FpCategory::Nan) => Self::NAN, + (FpCategory::Infinite, FpCategory::Infinite) => Self::NAN, + (FpCategory::Zero, FpCategory::Zero) => Self::NAN, + (FpCategory::Infinite, _) | (_, FpCategory::Zero) => { + if negative { + Self::NEG_INFINITY + } else { + Self::INFINITY + } + } + (FpCategory::Zero, _) | (_, FpCategory::Infinite) => { + if negative { + Self::NEG_ZERO + } else { + Self::ZERO + } + } + (_, _) => self.div_internal(rhs, negative), + } + } +} + +/*/// Returns tuple of division and whether u is less than v +pub const fn div_float(u: BUintD8, v: BUintD8) -> (BUintD8, bool) { + let gt = if let core::cmp::Ordering::Less = u.cmp(&v) { + 0 + } else { + 1 + }; + // `self` is padded with N trailing zeros (less significant digits). + // `v` is padded with N leading zeros (more significant digits). + let shift = v.digits[N - 1].leading_zeros(); + // `shift` is between 0 and 64 inclusive. + let v = super::unchecked_shl(v, shift); + // `v` is still padded with N leading zeros. + + struct Remainder { + first: Digit, + second: Digit, + rest: [Digit; M], + } + impl Remainder { + const fn new(uint: BUintD8, shift: ExpType) -> Self { + // This shift can be anything from 0 to 64 inclusive. + // Scenarios: + // * shift by 0 -> nothing happens, still N trailing zeros. + // * shift by 64 -> all digits shift by one to the right, there are now (N - 1) trailing zeros and 1 leading zero. + // * shift by amount between 0 and 64 -> there may be 0 or 1 leading zeros and (N - 1) or N trailing zeros. + // So indexing between 2N - 1 and N - 1 will get any non-zero digits. + // Instead of a logical right shift, we will perform a rotate right on the uint - this is the same except the part of the number which may have been removed from the right shift is instead brought to the most significant digit of the number. + // Then do fancy bit shifts and logic to separate the first and last non zero digits. + let shift = Digit::BITS - shift; + let mut rest = uint.rotate_right(shift); + let last_digit = rest.digits[M - 1]; + let last = (last_digit << shift) >> shift; + let second = last_digit ^ last; + rest.digits[M - 1] = last; + Self { + first: 0, + second, + rest: rest.digits, + } + } + const fn index(&self, index: usize) -> Digit { + if index == M - 1 { + self.first + } else if index == M { + self.second + } else if index > M { + self.rest[index - M - 1] + } else { + // There are M - 1 trailing zeros so we can return zero here. + 0 + } + } + const fn set_digit(&mut self, index: usize, digit: Digit) { + if index == M - 1 { + self.first = digit; + } else if index == M { + self.second = digit; + } else if index > M { + self.rest[index - M - 1] = digit; + } + } + /*const fn to_uint(self, shift: ExpType) -> BUintD8 { + let mut out = BUintD8::ZERO; + let mut i = 0; + while i < M { + out.digits[i] = self.index(i) >> shift; + i += 1; + } + if shift > 0 { + let mut i = 0; + while i < M { + out.digits[i] |= self.rest[i] << (Digit::BITS - shift); + i += 1; + } + } + out + }*/ + const fn sub(&mut self, start: usize, rhs: Mul, end: usize) -> bool { + let mut carry = false; + let mut i = 0; + while i < end { + let (sum, overflow1) = rhs.index(i).overflowing_add(carry as Digit); + let (sub, overflow2) = self.index(i + start).overflowing_sub(sum); + self.set_digit(i + start, sub); + carry = overflow1 || overflow2; + i += 1; + } + carry + } + const fn add(&mut self, start: usize, rhs: [Digit; M], end: usize) -> bool { + let mut carry = false; + let mut i = 0; + while i < end { + let (sum, overflow1) = rhs[i].overflowing_add(carry as Digit); + let (sum, overflow2) = self.index(i + start).overflowing_sub(sum); + self.set_digit(i + start, sum); + carry = overflow1 || overflow2; + i += 1; + } + carry + } + } + + // The whole implementation of `Mul` doesn't need to change as it is already padded with N leading zeros. + struct Mul { + last: Digit, + rest: [Digit; M], + } + impl Mul { + const fn new(uint: BUintD8, rhs: Digit) -> Self { + let mut rest = [0; M]; + let mut carry: Digit = 0; + let mut i = 0; + while i < M { + let (prod, c) = crate::arithmetic::mul_carry_unsigned(carry, 0, uint.digits[i], rhs); + carry = c; + rest[i] = prod; + i += 1; + } + Self { + last: carry, + rest, + } + } + const fn index(&self, index: usize) -> Digit { + if index == M { + self.last + } else { + self.rest[index] + } + } + } + + let mut u = Remainder::new(u, shift); + let mut q = BUintD8::ZERO; + let v_n_1 = v.digits[N - 1]; + let v_n_2 = v.digits[N - 2]; + let gt_half = v_n_1 > digit::HALF; + + let mut j = N + gt; + while j > gt { + j -= 1; + let u_jn = u.index(j + N); + let mut q_hat = if u_jn < v_n_1 { + let (mut q_hat, mut r_hat) = if gt_half { + BUintD8::::div_wide(u_jn, u.index(j + N - 1), v_n_1) + } else { + BUintD8::::div_half(u_jn, u.index(j + N - 1), v_n_1) + }; + loop { + let a = ((r_hat as DoubleDigit) << digit::BITS) | u.index(j + N - 2) as DoubleDigit; + let b = q_hat as DoubleDigit * v_n_2 as DoubleDigit; + if b <= a { + break; + } + /*let (hi, lo) = digit::from_double_digit(q_hat as DoubleDigit * v_n_2 as DoubleDigit); + if hi < r_hat { + break; + } else if hi == r_hat && lo <= u.index(j + n - 2) { + break; + }*/ + q_hat -= 1; + let (new_r_hat, overflow) = r_hat.overflowing_add(v_n_1); + r_hat = new_r_hat; + if overflow { + break; + } + } + q_hat + } else { + Digit::MAX + }; + + let q_hat_v = Mul::new(v, q_hat); + let carry = u.sub(j, q_hat_v, N + 1); + if carry { + q_hat -= 1; + let carry = u.add(j, v.digits, N); + u.set_digit(j + N, u.index(j + N).wrapping_add(carry as Digit)); + } + // if self is less than other, q_hat is 0 + q.digits[j - gt] = q_hat; + } + + (q, gt == 0) + //super::unchecked_shl(self.as_buint::<{N * 2}>(), N as u16 * 64).div_rem(v.as_buint::<{N * 2}>()).0 +}*/ \ No newline at end of file diff --git a/src/float/ops/mod.rs b/src/float/ops/mod.rs new file mode 100644 index 0000000..73a88cd --- /dev/null +++ b/src/float/ops/mod.rs @@ -0,0 +1,150 @@ +use super::Float; +use core::iter::{Iterator, Product, Sum}; +use core::ops::{Add, Div, Mul, Neg, Rem, Sub}; + +mod add; +mod sub; +mod mul; +mod div; +mod rem; + +impl Add for Float { + type Output = Self; + + #[inline] + fn add(self, rhs: Self) -> Self { + Self::add(self, rhs) + } +} + +impl Sub for Float { + type Output = Self; + + #[inline] + fn sub(self, rhs: Self) -> Self { + Self::sub(self, rhs) + } +} + +crate::int::ops::op_ref_impl!(Add> for Float, add); + +impl Sum for Float { + #[inline] + fn sum>(iter: I) -> Self { + iter.fold(Self::ZERO, |a, b| a + b) + } +} + +impl<'a, const W: usize, const MB: usize> Sum<&'a Self> for Float { + #[inline] + fn sum>(iter: I) -> Self { + iter.fold(Self::ONE, |a, b| a + *b) + } +} + +crate::int::ops::op_ref_impl!(Sub> for Float, sub); + +impl Mul for Float { + type Output = Self; + + #[inline] + fn mul(self, rhs: Self) -> Self { + Self::mul(self, rhs) + } +} + +impl Product for Float +where + [(); W * 2]:, +{ + #[inline] + fn product>(iter: I) -> Self { + iter.fold(Self::ONE, |a, b| a * b) + } +} + +impl<'a, const W: usize, const MB: usize> Product<&'a Self> for Float +where + [(); W * 2]:, +{ + #[inline] + fn product>(iter: I) -> Self { + iter.fold(Self::ONE, |a, b| a * *b) + } +} + +impl Div for Float +where + [(); W * 2]:, +{ + type Output = Self; + + #[inline] + fn div(self, rhs: Self) -> Self { + Self::div(self, rhs) + } +} + +// crate::int::ops::op_ref_impl!(Div> for Float, div); + +impl Rem for Float { + type Output = Self; + + #[inline] + fn rem(self, rhs: Self) -> Self { + Self::rem(self, rhs) + } +} + +crate::int::ops::op_ref_impl!(Rem> for Float, rem); + +crate::nightly::impl_const! { + impl const Neg for Float { + type Output = Self; + + #[inline] + fn neg(self) -> Self { + Self::neg(self) + } + } +} + +crate::nightly::impl_const! { + impl const Neg for &Float { + type Output = Float; + + #[inline] + fn neg(self) -> Float { + (*self).neg() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test::test_bignum; + use crate::test::types::{ftest, FTEST}; + + test_bignum! { + function: ::add(a: ftest, b: ftest) + } + test_bignum! { + function: ::sub(a: ftest, b: ftest) + } + test_bignum! { + function: ::mul(a: ftest, b: ftest), + cases: [ + (5.6143642e23f64 as ftest, 35279.223f64 as ftest) + ] + } + test_bignum! { + function: ::div(a: ftest, b: ftest) + } + test_bignum! { + function: ::rem(a: ftest, b: ftest) + } + test_bignum! { + function: ::neg(f: ftest) + } +} diff --git a/src/float/ops/mul.rs b/src/float/ops/mul.rs new file mode 100644 index 0000000..bec66af --- /dev/null +++ b/src/float/ops/mul.rs @@ -0,0 +1,149 @@ +use super::Float; +use crate::float::FloatExponent; +use crate::float::UnsignedFloatExponent; +use crate::BIntD8; +use crate::BUintD8; +use crate::ExpType; +use core::num::FpCategory; + +impl Float { + #[inline] + pub(crate) fn mul_internal(self, rhs: Self, negative: bool) -> Self { + let (a, b) = (self, rhs); + let (_, exp_a, mant_a) = a.into_biased_parts(); + let (_, exp_b, mant_b) = b.into_biased_parts(); + + // TODO: make so as_ can infer type so can switch trait definition if needed + let mut mant_prod = mant_a.widening_mul(mant_b); + + let prod_bits = if mant_prod.1.bits() == 0 { + mant_prod.0.bits() + } else { + mant_prod.1.bits() + Self::BITS + }; + + if prod_bits == 0 { + return if negative { Self::NEG_ZERO } else { Self::ZERO }; + } + + let extra_bits = if prod_bits > (Self::MB + 1) { + prod_bits - (Self::MB + 1) + } else { + 0 + }; + + let mut exp = + (exp_a as FloatExponent) - Self::EXP_BIAS + (exp_b as FloatExponent) + (extra_bits as FloatExponent) + - (MB as FloatExponent); + + if exp > Self::MAX_EXP + Self::EXP_BIAS - 1 { + return if negative { + Self::NEG_INFINITY + } else { + Self::INFINITY + }; + } + + let mut extra_shift = 0; + if !exp.is_positive() { + extra_shift = (1 - exp) as UnsignedFloatExponent; + exp = 1; + } + let total_shift = (extra_bits as UnsignedFloatExponent) + extra_shift; + + let mp0tz = mant_prod.0.trailing_zeros(); + let tz = if mp0tz == Self::BITS { + mant_prod.1.trailing_zeros() + Self::BITS + } else { + mp0tz + }; + + let sticky_bit = (tz as UnsignedFloatExponent + 1) < total_shift; + let mut mant = match ExpType::try_from(total_shift - 1) { + Ok(sub) => { + if sub > Self::BITS * 2 { + (BUintD8::ZERO, BUintD8::ZERO) + } else if sub >= Self::BITS { + (mant_prod.1 >> (sub - Self::BITS), BUintD8::ZERO) + } else { + let mask = BUintD8::MAX >> (Self::BITS - sub); + let carry = mant_prod.1 & mask; + mant_prod.1 >>= sub; + mant_prod.0 = (mant_prod.0 >> sub) | (carry << (Self::BITS - sub)); + mant_prod + } + } + Err(_) => (BUintD8::ZERO, BUintD8::ZERO), + }; + if mant.0.bit(0) { + if sticky_bit || mant.0.bit(1) { + // Round up + let (sum, carry) = mant.0.overflowing_add(BUintD8::ONE); + mant.0 = sum; + if carry { + mant.1 += BUintD8::ONE; + } + } + } + { + let carry = mant.1.bit(0); + //mant.1 >>= 1 as ExpType; + mant.0 >>= 1 as ExpType; + if carry { + mant.0 |= BIntD8::MIN.to_bits(); + } + } + + let mut m1b = mant.1.bits(); + if m1b != 0 { + m1b -= 1; + } + /*let bits = if m1b == 0 { + mant.0.bits() + } else { + m1b + Self::BITS + };*/ + let m0b = mant.0.bits(); + if m0b > Self::MB + 1 { + // it's possible that the mantissa has too many bits, so shift it right and increase the exponent until it has the correct number of bits + let inc = m0b - (Self::MB + 1); + mant.0 = mant.0 >> inc; + exp += inc as FloatExponent; + } + + if exp > Self::MAX_EXP + Self::EXP_BIAS - 1 { + return if negative { + Self::NEG_INFINITY + } else { + Self::INFINITY + }; + } + + if exp == 1 && m1b < Self::MB + 1 { + return Self::from_raw_parts(negative, 0, mant.0); + } + //if mant >> Self::MB != BUintD8::ZERO { + mant.0 ^= BUintD8::ONE << Self::MB; + //} + Self::from_raw_parts(negative, exp as UnsignedFloatExponent, mant.0) + } + + #[inline] + pub(super) fn mul(self, rhs: Self) -> Self { + let negative = self.is_sign_negative() ^ rhs.is_sign_negative(); + match (self.classify(), rhs.classify()) { + (FpCategory::Nan, _) | (_, FpCategory::Nan) => return Self::NAN, + (FpCategory::Infinite, FpCategory::Zero) | (FpCategory::Zero, FpCategory::Infinite) => { + Self::NAN + } + (FpCategory::Infinite, _) | (_, FpCategory::Infinite) => { + if negative { + Self::NEG_INFINITY + } else { + Self::INFINITY + } + } + (_, _) => self.mul_internal(rhs, negative), + } + } +} diff --git a/src/float/ops/rem.rs b/src/float/ops/rem.rs new file mode 100644 index 0000000..877dfc8 --- /dev/null +++ b/src/float/ops/rem.rs @@ -0,0 +1,120 @@ +use crate::float::UnsignedFloatExponent; +use crate::BUintD8; +use crate::BIntD8; +use crate::ExpType; +use super::Float; + +impl Float { + #[inline] + pub(super) fn rem(self, y: Self) -> Self { + handle_nan!(self; self); + handle_nan!(y; y); + + if y.is_zero() { + return Self::NAN; + } + if self.is_zero() { + return self; + } + if self.is_infinite() { + return Self::NAN; + } + if y.is_infinite() { + return self; + } + + let mut uxi = self.to_bits(); + let mut uyi = y.to_bits(); + let mut ex = self.signed_biased_exponent(); + let mut ey = y.signed_biased_exponent(); + let mut i; + + if uxi << 1 as ExpType <= uyi << 1 as ExpType { + if uxi << 1 as ExpType == uyi << 1 as ExpType { + return if self.is_sign_negative() { + Self::NEG_ZERO + } else { + Self::ZERO + }; + } + + return self; + } + + /* normalize x and y */ + if ex == 0 { + i = uxi << (Self::BITS - Self::MB); + while !BIntD8::from_bits(i).is_negative() { + ex -= 1; + i <<= 1 as ExpType; + } + + uxi <<= -ex + 1; + } else { + uxi &= BUintD8::MAX >> (Self::BITS - Self::MB); + uxi |= BUintD8::ONE << Self::MB; + } + + if ey == 0 { + i = uyi << (Self::BITS - Self::MB); + while !BIntD8::from_bits(i).is_negative() { + ey -= 1; + i <<= 1 as ExpType; + } + + uyi <<= -ey + 1; + } else { + uyi &= BUintD8::MAX >> (Self::BITS - Self::MB); + uyi |= BUintD8::ONE << Self::MB; + } + /* x mod y */ + while ex > ey { + i = uxi.wrapping_sub(uyi); + if !BIntD8::from_bits(i).is_negative() { + if i.is_zero() { + return if self.is_sign_negative() { + Self::NEG_ZERO + } else { + Self::ZERO + }; + } + uxi = i; + } + uxi <<= 1 as ExpType; + + ex -= 1; + } + + i = uxi.wrapping_sub(uyi); + if !BIntD8::from_bits(i).is_negative() { + if i.is_zero() { + return if self.is_sign_negative() { + Self::NEG_ZERO + } else { + Self::ZERO + }; + } + uxi = i; + } + + while (uxi >> Self::MB).is_zero() { + uxi <<= 1 as ExpType; + ex -= 1; + } + + /* scale result up */ + if ex.is_positive() { + uxi -= BUintD8::ONE << Self::MB; + uxi |= BUintD8::cast_from_unsigned_float_exponent(ex as UnsignedFloatExponent) << Self::MB; + } else { + uxi >>= -ex + 1; + } + + let f = Self::from_bits(uxi); + if self.is_sign_negative() { + -f + } else { + f + } + } +} diff --git a/src/float/ops/sub.rs b/src/float/ops/sub.rs new file mode 100644 index 0000000..7d4650f --- /dev/null +++ b/src/float/ops/sub.rs @@ -0,0 +1,140 @@ +use core::num::FpCategory; +use crate::cast::As; +use crate::float::FloatExponent; +use crate::float::UnsignedFloatExponent; +use crate::BUintD8; +use crate::ExpType; +use super::Float; + +// TODO: this very occasionally fails quickcheck tests, need to fix +impl Float { + #[inline] + pub(crate) fn sub_internal(mut self, mut rhs: Self, mut negative: bool) -> Self { + if rhs.abs() > self.abs() { + // If b has a larger exponent than a, swap a and b so that a has the larger exponent + negative = !negative; + core::mem::swap(&mut self, &mut rhs); + } + if self.abs() == rhs.abs() { + return Self::ZERO; + } + + let (_, a_exp, mut a_mant) = self.into_biased_parts(); + let (_, b_exp, mut b_mant) = rhs.into_biased_parts(); + let exp_diff = a_exp - b_exp; + + let mut a_exp = a_exp as FloatExponent; + + let sticky_bit2 = exp_diff != 0 + && exp_diff < BUintD8::::BITS.into() + && b_mant.bit(exp_diff.as_::() - 1); + let all_zeros = + exp_diff != 0 && b_mant.trailing_zeros() + 1 == exp_diff.as_::(); + + // Append extra bits to the mantissas to ensure correct rounding + a_mant = a_mant << 1 as ExpType; + b_mant = b_mant << 1 as ExpType; + + let sticky_bit = b_mant.trailing_zeros() < exp_diff.as_(); + + // If the shift causes an overflow, the b_mant is too small so is set to 0 + let shifted_b_mant = match exp_diff.try_into().ok() { + Some(exp_diff) => b_mant.checked_shr(exp_diff).unwrap_or(BUintD8::ZERO), + None => BUintD8::ZERO, + }; + + // If the shift causes an overflow, the b_mant is too small so is set to 0 + + if sticky_bit { + //b_mant |= 1; + } + + let mut mant = a_mant - shifted_b_mant; + + if mant.bits() == Self::MB + 2 { + if mant & BUintD8::from(0b10u8) == BUintD8::from(0b10u8) && !sticky_bit { + mant += BUintD8::ONE; + } + + mant >>= 1 as ExpType; + } else { + a_exp -= 1; + a_mant <<= 1 as ExpType; + b_mant <<= 1 as ExpType; + + let sticky_bit = b_mant.trailing_zeros() < exp_diff.as_(); + + // If the shift causes an overflow, the b_mant is too small so is set to 0 + let shifted_b_mant = match exp_diff.try_into().ok() { + Some(exp_diff) => b_mant.checked_shr(exp_diff).unwrap_or(BUintD8::ZERO), + None => BUintD8::ZERO, + }; + + // If the shift causes an overflow, the b_mant is too small so is set to 0 + + if sticky_bit { + //b_mant |= 1; + } + + mant = a_mant - shifted_b_mant; + + if mant.bits() == Self::MB + 2 { + if mant & BUintD8::from(0b10u8) == BUintD8::from(0b10u8) && !sticky_bit { + mant += BUintD8::ONE; + } + + mant >>= 1 as ExpType; + } else { + let _half_way = (); // TODO + if sticky_bit2 && !all_zeros + || (sticky_bit2 + && all_zeros + && b_mant & BUintD8::from(0b1u8) == BUintD8::from(0b1u8)) + { + mant -= BUintD8::ONE; + } + let bits = mant.bits(); + mant <<= Self::MB + 1 - bits; + a_exp -= MB as FloatExponent + 2 - bits as FloatExponent; + if !a_exp.is_positive() { + a_exp = 1; + mant >>= 1 - a_exp; + } + } + } + + if (mant >> Self::MB).is_zero() { + a_exp = 0; + } else { + mant ^= BUintD8::ONE << Self::MB; + } + + Self::from_raw_parts(negative, a_exp as UnsignedFloatExponent, mant) + } + + #[inline] + pub(super) fn sub(self, rhs: Self) -> Self { + match (self.classify(), rhs.classify()) { + (FpCategory::Nan, _) => self, + (_, FpCategory::Nan) => rhs, + (FpCategory::Infinite, FpCategory::Infinite) => { + match (self.is_sign_negative(), rhs.is_sign_negative()) { + (true, false) => Self::NEG_INFINITY, + (false, true) => Self::INFINITY, + _ => Self::NAN, + } + } + (FpCategory::Infinite, _) => self, + (_, FpCategory::Infinite) => rhs.neg(), + (_, _) => { + let self_negative = self.is_sign_negative(); + let rhs_negative = rhs.is_sign_negative(); + if self_negative ^ rhs_negative { + self.add_internal(rhs, self_negative) + } else { + self.sub_internal(rhs, self_negative) + } + } + } + } +} diff --git a/src/float/parse.rs b/src/float/parse.rs new file mode 100644 index 0000000..b67977e --- /dev/null +++ b/src/float/parse.rs @@ -0,0 +1,190 @@ +use crate::BUintD8; +use crate::ExpType; +use crate::cast::{As, CastFrom}; +use super::Float; + +impl Float { + pub fn parse(digits: &[u8], exp: i32) -> Self where [u8; W * 2]: { + let one = Self::ONE; + let two = Self::TWO; + let three: Self = 3u8.as_(); + let four: Self = 4u8.as_(); + let five: Self = 5u8.as_(); + let six: Self = 6u8.as_(); + let seven: Self = 7u8.as_(); + let eight: Self = 8u8.as_(); + let nine: Self = 9u8.as_(); + let ten: Self = 10u8.as_(); + + let mut out = Self::ZERO; + let mut pow = Self::ONE; + for d in digits { + let a = Self::cast_from(*d) * pow; + + out = out + a; + pow = pow / ten; + } + out.powi(exp) + } +} + +#[test] +fn test_parse() { + use core::str::FromStr; + use alloc::string::String; + + let digits = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8]; + let parsed = super::F32::parse(&digits, 0); + let mut s = unsafe { String::from_utf8_unchecked(digits.into_iter().map(|d| d + 48).collect()) }; + s.insert(1, '.'); + s.push_str(""); + // println!("{}", s); + // println!("{:032b}", parsed.to_bits()); + // println!("{:032b}", f32::from_str(&s).unwrap().to_bits()); + // println!("F: {}", parsed.to_f32()); + // println!("f: {}", f32::from_str(&s).unwrap()); + // println!("n: {}", ::from_str_radix(&s, 10).unwrap()); +} + +// TODO: create round-to-nearest ties-to-even function, it could take a uint and a target bit width, and return the correctly rounded result in the target precision, as well as the overflow, and whether a round up occurred +// #[allow(unused)] +// fn f64_as_f32(f: f64) -> f32 { +// if f.is_infinite() { +// return if f.is_sign_negative() { +// f32::NEG_INFINITY +// } else { +// f32::INFINITY +// }; +// } +// if f == 0.0 && f.is_sign_positive() { +// return 0.0; +// } +// if f == 0.0 && f.is_sign_negative() { +// return -0.0; +// } +// let bits = f.to_bits(); +// let mut mant = bits & 0xfffffffffffff; +// let mut exp = ((bits & (i64::MAX as u64)) >> 52) as i32; +// if exp != 0 { +// mant |= 0x10000000000000; + +// } else { +// exp = 1; +// } +// exp -= 1023; +// //println!("exp: {}", exp); +// let mut mantissa_shift = 52 - 23; +// /*if mant.leading_zeros() != 64 - (52 + 1) { +// exp +// }*/ +// if exp >= f32::MAX_EXP { +// return if f.is_sign_negative() { +// f32::NEG_INFINITY +// } else { +// f32::INFINITY +// }; +// } +// if exp < f32::MIN_EXP - 1 { +// let diff = (f32::MIN_EXP - 1) - exp; +// mantissa_shift += diff; +// exp = -(f32::MAX_EXP - 1); +// } +// let new_mant = mant.checked_shr(mantissa_shift as u32).unwrap_or(0); +// //println!("{:025b}", new_mant); + +// let shifted_back = new_mant.checked_shl(mantissa_shift as u32).unwrap_or(0); +// let overflow = mant ^ shifted_back; +// /*println!("overflow: {:029b}", overflow); +// println!("mant: {:053b}", mant); +// println!("shbk: {:053b}", shifted_back); +// println!("lz: {}", overflow.leading_zeros());*/ +// if overflow.leading_zeros() as i32 == 64 - mantissa_shift { // this means there is a one at the overflow bit +// if overflow.count_ones() == 1 { // this means the overflowing is 100...00 so apply ties-to-even rounding +// if new_mant & 1 == 1 { // if the last bit is 1, then we round up +// mant = new_mant + 1; +// //println!("updated mant: {:025b}", mant); +// } else { // otherwise we round down +// mant = new_mant; +// } +// } else { +// mant = new_mant + 1; // round up +// } +// } else { +// mant = new_mant; +// } +// //1111111111111111111111111 +// //111111111111111111111111 +// if mant.leading_zeros() < 64 - (23 + 1) { +// // println!("mant overflow"); +// mant >>= 1; +// exp += 1; +// } +// if exp > f32::MAX_EXP { +// return if f.is_sign_negative() { +// f32::NEG_INFINITY +// } else { +// f32::INFINITY +// }; +// } +// mant ^= 0x800000; +// let sign = (bits >> 63) as u32; +// let exp = (exp + (f32::MAX_EXP - 1)) as u32; +// let mant = mant as u32; +// let bits = (sign << 31) | (exp << 23) | mant; +// f32::from_bits(bits) +// } + +// #[cfg(test)] +// quickcheck::quickcheck! { +// fn qc_test_f64_as_f32(f: f64) -> quickcheck::TestResult { +// if !f.is_finite() { +// return quickcheck::TestResult::discard(); +// } +// let f2 = f64_as_f32(f); +// let f3 = f as f32; +// quickcheck::TestResult::from_bool(f2 == f3) +// } +// } + +// type U32 = BUintD32::<1>; +// fn parse(s: &str) -> (types::U128, U32) { +// let mut radix = 10; +// let mut custom_radix = false; +// let mut src = s; +// let bytes = s.as_bytes(); +// let len = bytes.len(); +// let mut first_char_zero = false; +// let mut bit_width = U32::power_of_two(7); +// let mut i = 0; +// while i < len { +// let byte = bytes[i]; +// if i == 0 && byte == b'0' { +// first_char_zero = true; +// } else if i == 1 && first_char_zero && (byte == b'b' || byte == b'o' || byte == b'x') { +// let ptr = unsafe { src.as_ptr().add(2) }; +// let new = core::ptr::slice_from_raw_parts(ptr, src.len() - 2); +// src = unsafe { &*(new as *const str) }; +// radix = match byte { +// b'b' => 2, +// b'o' => 8, +// b'x' => 16, +// _ => unreachable!(), +// }; +// custom_radix = true; +// } +// if i != 0 && i != len - 1 && byte == b'u' { +// let old_len = src.len(); +// let ptr = src.as_ptr(); + +// let new_len = if custom_radix { i - 2 } else { i }; +// let bit_width_ptr = core::ptr::slice_from_raw_parts(unsafe { ptr.add(new_len + 1) }, old_len - new_len - 1); +// let new = core::ptr::slice_from_raw_parts(ptr, new_len); +// src = unsafe { &*(new as *const str) }; +// let bit_width_str = unsafe { &*(bit_width_ptr as *const str) }; +// bit_width = U32::parse_str_radix(bit_width_str, 10); +// break; +// } +// i += 1; +// } +// (types::U128::parse_str_radix(src, radix), bit_width) +// } \ No newline at end of file diff --git a/src/float/random.rs b/src/float/random.rs new file mode 100644 index 0000000..d355124 --- /dev/null +++ b/src/float/random.rs @@ -0,0 +1,86 @@ +use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler}; +use rand::distributions::{Distribution, Open01, OpenClosed01, Standard}; +use rand::{Error, Fill, Rng, SeedableRng}; + +use super::{Float, FloatExponent}; +use crate::BUintD8; + +fn seeded_rngs(seed: u64) -> (R, R) { + let rng = R::seed_from_u64(seed); + let rng2 = rng.clone(); // need to clone `rng` to produce the same results before it is used as `gen` mutates it + (rng, rng2) +} + +impl Distribution> for Standard { + fn sample(&self, rng: &mut R) -> Float { + let random_bits: BUintD8 = rng.gen(); + let mantissa = random_bits.shr(Float::::BITS - Float::::MB - 1); + if mantissa.is_zero() { + return Float::ZERO; + } + if mantissa.is_one() { + return Float::HALF_EPSILON; + } + let mantissa_bits = mantissa.bits(); + let abs_exponent = Float::::MB + 2 - mantissa_bits; // has to be in this order to prevent overflow + Float::from_signed_parts(false, -(abs_exponent as FloatExponent), mantissa.shl(abs_exponent - 1)) + } +} + +impl Distribution> for OpenClosed01 { + fn sample(&self, rng: &mut R) -> Float { + let random_bits: BUintD8 = rng.gen(); + let mantissa = random_bits.shr(Float::::BITS - Float::::MB - 1); + let mantissa = mantissa.add(BUintD8::ONE); // add one so mantissa is never zero + if mantissa.is_zero() { + return Float::HALF_EPSILON; + } + let mantissa_bits = mantissa.bits(); + let abs_exponent = Float::::MB + 2 - mantissa_bits; // has to be in this order to prevent overflow + Float::from_signed_parts(false, -(abs_exponent as FloatExponent), mantissa.shl(abs_exponent - 1)) + } +} + +impl Distribution> for Open01 { + fn sample(&self, rng: &mut R) -> Float { + let random_bits: BUintD8 = rng.gen(); + let mantissa = random_bits.shr(Float::::BITS - Float::::MB); + if mantissa.is_zero() { + return Float::HALF_EPSILON; + } + let mantissa_bits = mantissa.bits(); + let abs_exponent = Float::::MB + 1 - mantissa_bits; // has to be in this order to prevent overflow + let exponent = -(abs_exponent as FloatExponent); + let mantissa = mantissa.shl(1).bitor(BUintD8::ONE); // = 2*mantissa + 1 + let mantissa = mantissa.shl(Float::::MB - mantissa_bits); + let a = Float::from_signed_parts(false, exponent, mantissa); + a + } +} + +#[cfg(test)] +mod tests { + use rand::distributions::OpenClosed01; + use rand::rngs::StdRng; + use rand::{distributions::Open01, rngs::SmallRng}; + use rand::{Rng, SeedableRng}; + use crate::{float::F32, test::convert}; + + use super::seeded_rngs; + + #[test] + fn test_random() { + let mut r = StdRng::from_entropy(); + let seed = r.gen(); + let (mut r1, mut r2) = seeded_rngs::(seed); + let big: F32 = r1.gen(); + let prim: f32 = r2.gen(); + + assert!(convert::test_eq(big, prim)); + + let big: F32 = r1.sample(OpenClosed01); + let prim: f32 = r2.sample(OpenClosed01); + + assert!(convert::test_eq(big, prim)); + } +} \ No newline at end of file diff --git a/src/float/rounding copy.rs b/src/float/rounding copy.rs new file mode 100644 index 0000000..224be77 --- /dev/null +++ b/src/float/rounding copy.rs @@ -0,0 +1,334 @@ +use crate::{BIntD8, BUintD8}; +use crate::cast::As; +use crate::doc; +use crate::ExpType; +use super::Float; + +type Digit = u8; + +impl BUintD8 { + #[inline] + const fn to_exp_type(&self) -> Option { + let mut out = 0; + let mut i = 0; + if Digit::BITS > ExpType::BITS { + let small = self.digits[i] as ExpType; + let trunc = small as Digit; + if self.digits[i] != trunc { + return None; + } + out = small; + i = 1; + } else { + loop { + let shift = i << crate::digit::u8::BIT_SHIFT; // TODO: make sure to generalise when using general digits + if i >= W || shift >= ExpType::BITS as usize { + break; + } + out |= (self.digits[i] as ExpType) << shift; + i += 1; + } + } + + while i < W { + if self.digits[i] != 0 { + return None; + } + i += 1; + } + + Some(out) + } + + #[inline] + const fn from_exp_type(int: ExpType) -> Option { + let mut out = Self::ZERO; + let mut i = 0; + while i << crate::digit::u8::BIT_SHIFT < ExpType::BITS as usize { // TODO: make sure to generalise when using general digits + let d = (int >> (i << crate::digit::u8::BIT_SHIFT)) as Digit; // TODO: make sure to generalise when using general digits + if d != 0 { + if i < W { + out.digits[i] = d; + } else { + return None; + } + } + i += 1; + } + Some(out) + } +} + +#[derive(PartialEq, Eq)] +enum FractType { + Zero, + AbsEqHalf, + AbsGtHalf, + Other +} + +#[doc = doc::rounding::impl_desc!()] +impl Float { + #[doc = doc::rounding::round!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn round(self) -> Self { + let (fract, trunc) = self.fract_trunc(); + if fract.abs() < Self::HALF { + trunc + } else if trunc.is_sign_negative() { + trunc - Self::ONE + } else { + trunc + Self::ONE + } + } + + #[inline] + pub fn round2(self) -> Self { + let a = Self::HALF - Self::QUARTER * Self::EPSILON; + if self.is_sign_positive() { + (self + a).trunc() + } else { + (self - a).trunc() + } + } + + #[doc = doc::rounding::ceil!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn ceil(self) -> Self { + let (fract, trunc) = self.fract_trunc(); + if self.is_sign_negative() || fract.is_zero() { + trunc + } else { + trunc + Self::ONE + } + } + + #[inline] + pub fn ceil2(self) -> Self { + let mut u = self.to_bits(); + let e = self.signed_biased_exponent() - Self::EXP_BIAS; + + if e >= MB as FloatExponent { + return self; + } + if !e.is_negative() { + let m = (BUintD8::MAX >> (Self::BITS - Self::MB)) >> e; + if (u & m).is_zero() { + return self; + } + if self.is_sign_positive() { + u += m; + } + u &= !m; + } else if self.is_sign_negative() { + return Self::NEG_ZERO; + } else if !(u << 1u8).is_zero() { + return Self::ONE; + } + Self::from_bits(u) + } + + #[doc = doc::rounding::floor!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn floor3(self) -> Self { + if !self.is_finite() { + return self; + } + let (_, trunc) = self.fract_trunc(); + return trunc; + // match fract_type { + // true => return trunc, + // _ => {}, + // } + if self.is_sign_positive() { + trunc + } else { + trunc - Self::ONE + } + // let (fract, trunc) = self.fract_trunc(); + // if self.is_sign_positive() || fract.is_zero() { + // trunc + // } else { + // trunc - Self::ONE + // } + } + + #[inline] + pub fn floor(self) -> Self { + if !self.is_finite() { + return self; + } + if self.is_zero() { + return self; + } + + let (sign, exponent, mantissa) = self.into_signed_parts(); + + use core::cmp::Ordering; + if exponent.is_negative() { // exponent is negative, so absolute value must be < 1, so truncate to 0 + return if sign { // self is equal to its fractional part, and isn't zero + Self::NEG_ONE + } else { + Self::ZERO + }; + } + // exponent is >= 0, so can take unsigned_abs without changing its value + + debug_assert!(!self.is_subnormal()); // exponent >= 0 so number should be normal, so mantissa has implicit leading one + + if let Some(small_exponent) = exponent.unsigned_abs().to_exp_type() { + if small_exponent >= Self::MB { // if the exponent exceeds the number of mantissa bits, then the number is an integer so truncation does nothing and fractional part is zero + self + } else { + let mask = BUintD8::::MAX.shl(Self::MB - small_exponent); + let trunc_mantissa = mantissa.bitand(mask); // set the last MB - exponent bits of the mantissa to zero - this is the fractional part + + let trunc = Self::from_signed_parts(sign, exponent, trunc_mantissa); + if trunc_mantissa.eq(&mantissa) || !sign { + trunc + } else { + trunc - Self::ONE + } + } + } else { + self + } + } + + #[inline] + pub fn floor2(self) -> Self { + let mut bits = self.to_bits(); + let e = self.signed_biased_exponent() - Self::EXP_BIAS; + + if e >= MB as FloatExponent { + return self; + } + if !e.is_negative() { + let m = (BUintD8::MAX >> (Self::BITS - Self::MB)) >> e; + if (bits & m).is_zero() { + return self; + } + if self.is_sign_negative() { + bits += m; + } + bits &= !m; + } else if self.is_sign_positive() { + return Self::ZERO; + } else if !(bits << 1u8).is_zero() { + return Self::NEG_ONE; + } + Self::from_bits(bits) + } + + #[inline] + pub const fn fract_trunc(self) -> (Self, Self) { + handle_nan!((self, self); self); + if self.is_infinite() { + return (Self::NAN, self); + } + let (f, _, t) = self.fract_fract_type_trunc(); + (f, t) + } + + /// compute fract, fract type and trunc. useful when only fract type is needed, not full fract, which is slower to compute + #[inline] + const fn fract_fract_type_trunc(self) -> (Self, bool, Self) { + debug_assert!(self.is_finite()); + if self.is_zero() { + return (Self::ZERO, true, self); + } + + let (sign, exponent, mantissa) = self.into_signed_parts(); + + use core::cmp::Ordering; + if exponent.is_negative() { // exponent is negative, so absolute value must be < 1, so truncate to 0 + // let fract_type = ; + return if sign { + (self, false, Self::NEG_ZERO) + } else { + (self, false, Self::ZERO) + }; + } + // exponent is >= 0, so can take unsigned_abs without changing its value + + debug_assert!(!self.is_subnormal()); // exponent >= 0 so number should be normal, so mantissa has implicit leading one + + if let Some(small_exponent) = exponent.unsigned_abs().to_exp_type() { + if small_exponent >= Self::MB { // if the exponent exceeds the number of mantissa bits, then the number is an integer so truncation does nothing and fractional part is zero + (Self::ZERO, true, self) + } else { + let mask = BUintD8::::MAX.shl(Self::MB - small_exponent); + let trunc_mantissa = mantissa.bitand(mask); // set the last MB - exponent bits of the mantissa to zero - this is the fractional part + + let trunc = Self::from_signed_parts(sign, exponent, trunc_mantissa); + let unshifted_mantissa = mantissa.bitand(mask.not()); + let fract = if trunc_mantissa.eq(&mantissa) { + Self::ZERO + } else { + // let half = BUintD8::::power_of_two(Self::MB - small_exponent); + // let fract_type = match unshifted_mantissa.cmp(&BUintD8::::power_of_two(Self::MB - small_exponent)) { + // Ordering::Equal => FractType::AbsEqHalf, + // Ordering::Greater => FractType::AbsGtHalf, + // Ordering::Less => FractType::Other, + // }; + let shift = Self::MB + 1 - unshifted_mantissa.bits(); // amount of zeros before the first 1 in the fractional part 0.0...01... + // debug_assert!(shift > 0); + let fract_mantissa = unshifted_mantissa.shl(shift); + let abs_fract_exponent = BUintD8::::from_exp_type(shift - small_exponent).unwrap(); // absolute value of exponent of fractional part + let fract_exponent = abs_fract_exponent.cast_signed().neg(); + let fract = Self::from_signed_parts(sign, fract_exponent, fract_mantissa); + + fract + }; + (fract, if unshifted_mantissa.is_zero() { + true + } else { + false + }, trunc) + } + } else { + (Self::ZERO, true, self) + } + } + + #[doc = doc::rounding::trunc!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub const fn trunc(self) -> Self { + self.fract_trunc().1 + } + + #[doc = doc::rounding::fract!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub const fn fract(self) -> Self { + self.fract_trunc().0 + } +} + +#[cfg(test)] +mod tests { + use crate::test::test_bignum; + use crate::test::types::{ftest, FTEST}; + + test_bignum! { + function: ::ceil(f: ftest) + } + test_bignum! { + function: ::floor(f: ftest) + } + test_bignum! { + function: ::round(f: ftest), + cases: [(-4376634.5), (8388609.0)] + } + test_bignum! { + function: ::trunc(f: ftest) + } + test_bignum! { + function: ::fract(f: ftest), + cases: [(1.5), (4.5), (16434.5)] + } +} \ No newline at end of file diff --git a/src/float/rounding.rs b/src/float/rounding.rs new file mode 100644 index 0000000..9c8c211 --- /dev/null +++ b/src/float/rounding.rs @@ -0,0 +1,281 @@ +use crate::float::UnsignedFloatExponent; +use crate::{BIntD8, BUintD8}; +use crate::doc; +use crate::ExpType; +use super::{Float, FloatExponent}; + +type Digit = u8; + +impl BUintD8 { + #[inline] + pub(crate) const fn to_exp_type(&self) -> Option { + let mut out = 0; + let mut i = 0; + if Digit::BITS > ExpType::BITS { + let small = self.digits[i] as ExpType; + let trunc = small as Digit; + if self.digits[i] != trunc { + return None; + } + out = small; + i = 1; + } else { + loop { + let shift = i << crate::digit::u8::BIT_SHIFT; // TODO: make sure to generalise when using general digits + if i >= W || shift >= ExpType::BITS as usize { + break; + } + out |= (self.digits[i] as ExpType) << shift; + i += 1; + } + } + + while i < W { + if self.digits[i] != 0 { + return None; + } + i += 1; + } + + Some(out) + } + + #[inline] + pub(crate) const fn from_exp_type(int: ExpType) -> Option { + let mut out = Self::ZERO; + let mut i = 0; + while i << crate::digit::u8::BIT_SHIFT < ExpType::BITS as usize { // TODO: make sure to generalise when using general digits + let d = (int >> (i << crate::digit::u8::BIT_SHIFT)) as Digit; // TODO: make sure to generalise when using general digits + if d != 0 { + if i < W { + out.digits[i] = d; + } else { + return None; + } + } + i += 1; + } + Some(out) + } +} + +#[doc = doc::rounding::impl_desc!()] +impl Float { + #[doc = doc::rounding::floor!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn floor(self) -> Self { + let (fract, trunc) = self.fract_trunc(); + if self.is_sign_positive() || fract.is_zero() { + trunc + } else { + trunc - Self::ONE + } + } + + #[inline] + pub fn floor2(self) -> Self { + let mut bits = self.to_bits(); + let e = self.signed_biased_exponent() - Self::EXP_BIAS; + + if e >= MB as FloatExponent { + return self; + } + if !e.is_negative() { + let m = (BUintD8::MAX >> (Self::BITS - Self::MB)) >> e; + if (bits & m).is_zero() { + return self; + } + if self.is_sign_negative() { + bits += m; + } + bits &= !m; + } else if self.is_sign_positive() { + return Self::ZERO; + } else if !(bits << 1u8).is_zero() { + return Self::NEG_ONE; + } + Self::from_bits(bits) + } + + #[doc = doc::rounding::ceil!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn ceil(self) -> Self { + let (fract, trunc) = self.fract_trunc(); + if self.is_sign_negative() || fract.is_zero() { + trunc + } else { + trunc + Self::ONE + } + } + + #[inline] + pub fn ceil2(self) -> Self { + let mut u = self.to_bits(); + let e = self.signed_biased_exponent() - Self::EXP_BIAS; + + if e >= MB as FloatExponent { + return self; + } + if !e.is_negative() { + let m = (BUintD8::MAX >> (Self::BITS - Self::MB)) >> e; + if (u & m).is_zero() { + return self; + } + if self.is_sign_positive() { + u += m; + } + u &= !m; + } else if self.is_sign_negative() { + return Self::NEG_ZERO; + } else if !(u << 1u8).is_zero() { + return Self::ONE; + } + Self::from_bits(u) + } + + #[doc = doc::rounding::round!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub fn round(self) -> Self { + let (fract, trunc) = self.fract_trunc(); + if fract.abs() < Self::HALF { + trunc + } else if trunc.is_sign_negative() { + trunc - Self::ONE + } else { + trunc + Self::ONE + } + } + + #[inline] + pub fn round2(self) -> Self { + let a = Self::HALF - Self::QUARTER * Self::EPSILON; + if self.is_sign_positive() { + (self + a).trunc() + } else { + (self - a).trunc() + } + } + + #[inline] + pub fn round_ties_even(self) -> Self { + use core::cmp::Ordering; + + let (fract, trunc) = self.fract_trunc(); + match fract.abs().total_cmp(&Self::HALF) { + Ordering::Less => trunc, + Ordering::Greater => if trunc.is_sign_negative() { + trunc - Self::ONE + } else { + trunc + Self::ONE + }, + Ordering::Equal => { + let (_, exponent, mantissa) = trunc.into_signed_parts(); + let mantissa_length = (Self::MB - mantissa.trailing_zeros()) as FloatExponent; + debug_assert!(exponent >= mantissa_length); + let is_even = exponent > mantissa_length; + if is_even { + trunc + } else if trunc.is_sign_negative() { + trunc - Self::ONE + } else { + trunc + Self::ONE + } + }, + } + } + + #[inline] + pub const fn fract_trunc(self) -> (Self, Self) { + handle_nan!((self, self); self); + if self.is_infinite() { + return (Self::NAN, self); + } + if self.is_zero() { + return (Self::ZERO, self); + } + + let (sign, exponent, mantissa) = self.into_signed_parts(); + + if exponent.is_negative() { // exponent is negative, so absolute value must be < 1, so truncate to 0 + return if sign { + (self, Self::NEG_ZERO) + } else { + (self, Self::ZERO) + }; + } + // exponent is >= 0, so can take unsigned_abs without changing its value + + debug_assert!(!self.is_subnormal()); // exponent >= 0 so number should be normal, so mantissa has implicit leading one + + let abs_exponent = exponent.unsigned_abs(); + if UnsignedFloatExponent::BITS - abs_exponent.leading_zeros() <= ExpType::BITS { // if number of bits needed to store abs_exponent is less than bit width of ExpType, then can cast + let small_exponent = abs_exponent as ExpType; + if small_exponent >= Self::MB { // if the exponent exceeds the number of mantissa bits, then the number is an integer so truncation does nothing and fractional part is zero + (Self::ZERO, self) + } else { + let mask = BUintD8::::MAX.shl(Self::MB - small_exponent); + let trunc_mantissa = mantissa.bitand(mask); // set the last MB - exponent bits of the mantissa to zero - this is the fractional part + + let trunc = Self::from_signed_parts(sign, exponent, trunc_mantissa); + let fract = if trunc_mantissa.eq(&mantissa) { + Self::ZERO + } else { + let unshifted_mantissa = mantissa.bitand(mask.not()); + let shift = Self::MB + 1 - unshifted_mantissa.bits(); // amount of zeros before the first 1 in the fractional part 0.0...01... + // debug_assert!(shift > 0); + let fract_mantissa = unshifted_mantissa.shl(shift); + let abs_fract_exponent = (shift - small_exponent) as UnsignedFloatExponent; // absolute value of exponent of fractional part + let fract_exponent = -(abs_fract_exponent as FloatExponent); + let fract = Self::from_signed_parts(sign, fract_exponent, fract_mantissa); + + fract + }; + (fract, trunc) + } + } else { + (Self::ZERO, self) + } + } + + #[doc = doc::rounding::trunc!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub const fn trunc(self) -> Self { + self.fract_trunc().1 + } + + #[doc = doc::rounding::fract!(F)] + #[must_use = doc::must_use_op!(float)] + #[inline] + pub const fn fract(self) -> Self { + self.fract_trunc().0 + } +} + +#[cfg(test)] +mod tests { + use crate::test::test_bignum; + use crate::test::types::{ftest, FTEST}; + + test_bignum! { + function: ::floor(f: ftest) + } + test_bignum! { + function: ::ceil(f: ftest) + } + test_bignum! { + function: ::round(f: ftest) + } + test_bignum! { + function: ::round_ties_even(f: ftest) + } + test_bignum! { + function: ::trunc(f: ftest) + } + test_bignum! { + function: ::fract(f: ftest) + } +} \ No newline at end of file diff --git a/src/helpers.rs b/src/helpers.rs new file mode 100644 index 0000000..1886c56 --- /dev/null +++ b/src/helpers.rs @@ -0,0 +1,106 @@ +use crate::ExpType; + +pub trait Bits { + const BITS: ExpType; + + fn bits(&self) -> ExpType; + fn bit(&self, index: ExpType) -> bool; +} + +macro_rules! impl_bits_for_uint { + ($($uint: ty), *) => { + $(impl Bits for $uint { + const BITS: ExpType = Self::BITS as ExpType; + + #[inline] + fn bits(&self) -> ExpType { + (Self::BITS - self.leading_zeros()) as ExpType + } + + #[inline] + fn bit(&self, index: ExpType) -> bool { + self & (1 << index) != 0 + } + })* + }; +} + +impl_bits_for_uint!(u8, u16, u32, u64, u128, usize); + +#[cfg(feature = "float")] +macro_rules! impl_bits_for_buint { + ($BUint: ident, $BInt: ident, $Digit: ident) => { + impl crate::helpers::Bits for $BUint { + const BITS: ExpType = Self::BITS; + + #[inline] + fn bits(&self) -> ExpType { + Self::bits(&self) + } + + #[inline] + fn bit(&self, index: ExpType) -> bool { + Self::bit(&self, index) + } + } + }; +} + +#[cfg(feature = "float")] +crate::macro_impl!(impl_bits_for_buint); + +pub trait Zero: Sized + PartialEq { + const ZERO: Self; + + fn is_zero(&self) -> bool { + self == &Self::ZERO + } +} + +pub trait One: Sized + PartialEq { + const ONE: Self; + + fn is_one(&self) -> bool { + self == &Self::ONE + } +} + +macro_rules! impl_zero_for_uint { + ($($uint: ty), *) => { + $(impl Zero for $uint { + const ZERO: Self = 0; + })* + }; +} + +impl_zero_for_uint!(u8, u16, u32, u64, u128, usize); + +macro_rules! impl_one_for_int { + ($($uint: ty), *) => { + $(impl One for $uint { + const ONE: Self = 1; + })* + }; +} + +impl_one_for_int!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize); + +macro_rules! impl_zero_for_buint { + ($BUint: ident, $BInt: ident, $Digit: ident) => { + impl crate::helpers::Zero for $BUint { + const ZERO: Self = Self::ZERO; + } + }; +} + +crate::macro_impl!(impl_zero_for_buint); + +macro_rules! impl_one_for_buint { + ($BUint: ident, $BInt: ident, $Digit: ident) => { + impl crate::helpers::One for $BUint { + const ONE: Self = Self::ONE; + } + }; +} + +crate::macro_impl!(impl_one_for_buint); diff --git a/src/int/numtraits.rs b/src/int/numtraits.rs index e6d885e..d2645c0 100644 --- a/src/int/numtraits.rs +++ b/src/int/numtraits.rs @@ -46,7 +46,7 @@ macro_rules! as_bigint_impl { pub(crate) use as_bigint_impl; macro_rules! impls { - ($Int: ident, $BUint: ident, $BInt: ident) => { + ($Int: ident, $BUint: ident, $BInt: ident, $Digit: ident) => { //crate::nightly::impl_const! { impl Bounded for $Int { #[inline] @@ -200,6 +200,48 @@ macro_rules! impls { } //} + // #[cfg(feature = "nightly")] + // #[doc = crate::doc::requires_feature!("nightly")] + // impl FromBytes for $Int { + // type Bytes = [u8; $BUint::::BYTES_USIZE]; + + // #[inline] + // fn from_be_bytes(&self) -> Self::BYTES { + // Self::to_be_bytes(*self) + // } + + // #[inline] + // fn from_le_bytes(&self) -> Self::BYTES { + // Self::to_le_bytes(*self) + // } + + // #[inline] + // fn from_ne_bytes(&self) -> Self::BYTES { + // Self::to_ne_bytes(*self) + // } + // } + + // #[cfg(feature = "nightly")] + // #[doc = crate::doc::requires_feature!("nightly")] + // impl ToBytes for $Int { + // type Bytes = [u8; $BUint::::BYTES_USIZE]; + + // #[inline] + // fn to_be_bytes(&self) -> Self::BYTES { + // Self::to_be_bytes(*self) + // } + + // #[inline] + // fn to_le_bytes(&self) -> Self::BYTES { + // Self::to_le_bytes(*self) + // } + + // #[inline] + // fn to_ne_bytes(&self) -> Self::BYTES { + // Self::to_ne_bytes(*self) + // } + // } + //crate::nightly::impl_const! { impl MulAdd for $Int { type Output = Self; @@ -246,9 +288,13 @@ macro_rules! impls { #[inline] fn is_one(&self) -> bool { - core::cmp::PartialEq::eq(self, &Self::ONE) + Self::is_one(&self) } } + + impl ConstOne for $Int { + const ONE: Self = Self::ONE; + } //} //crate::nightly::impl_const! { @@ -260,9 +306,13 @@ macro_rules! impls { #[inline] fn is_zero(&self) -> bool { - Self::is_zero(self) + Self::is_zero(&self) } } + + impl ConstZero for $Int { + const ZERO: Self = Self::ZERO; + } //} } } diff --git a/src/lib.rs b/src/lib.rs index 3204455..79b1742 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,7 +5,6 @@ generic_const_exprs, const_trait_impl, const_option, - // effects, ) )] #![cfg_attr( @@ -37,6 +36,7 @@ mod digit; mod doc; pub mod errors; mod int; +mod helpers; mod nightly; pub mod prelude; @@ -45,11 +45,11 @@ pub mod random; pub mod types; -// #[cfg(feature = "nightly")] -// mod float; +#[cfg(feature = "float")] +mod float; -// #[cfg(feature = "nightly")] -// pub use float::Float; +#[cfg(feature = "float")] +pub use float::Float; #[cfg(test)] mod test; diff --git a/src/test/convert.rs b/src/test/convert.rs index 697b1c9..d13b5be 100644 --- a/src/test/convert.rs +++ b/src/test/convert.rs @@ -77,29 +77,29 @@ impl TestConvert for f32 { } } -// #[cfg(feature = "nightly")] -// impl TestConvert for crate::float::F64 { -// type Output = u64; +#[cfg(feature = "float")] +impl TestConvert for crate::float::F64 { + type Output = u64; -// #[inline] -// fn into(self) -> Self::Output { -// use crate::cast::As; + #[inline] + fn into(self) -> Self::Output { + use crate::cast::As; -// self.to_bits().as_() -// } -// } + self.to_bits().as_() + } +} -// #[cfg(feature = "nightly")] -// impl TestConvert for crate::float::F32 { -// type Output = u32; +#[cfg(feature = "float")] +impl TestConvert for crate::float::F32 { + type Output = u32; -// #[inline] -// fn into(self) -> Self::Output { -// use crate::cast::As; + #[inline] + fn into(self) -> Self::Output { + use crate::cast::As; -// self.to_bits().as_() -// } -// } + self.to_bits().as_() + } +} impl TestConvert for (T, U) { type Output = (::Output, ::Output); diff --git a/src/test/mod.rs b/src/test/mod.rs index 042994a..a27df84 100644 --- a/src/test/mod.rs +++ b/src/test/mod.rs @@ -31,6 +31,8 @@ impl Arbitrary for U8ArrayWrapper { use core::fmt::{self, Debug, Formatter}; +use crate::BUintD8; + impl Debug for U8ArrayWrapper { fn fmt(&self, f: &mut Formatter) -> fmt::Result { self.0.fmt(f) diff --git a/src/test/types.rs b/src/test/types.rs index 213e0a3..2c01469 100644 --- a/src/test/types.rs +++ b/src/test/types.rs @@ -48,18 +48,18 @@ mod small_types { pub use core::primitive::*; pub use small_types::*; -// #[cfg(test_int_bits = "64")] -// #[allow(non_camel_case_types)] -// pub type ftest = f64; +#[cfg(test_int_bits = "64")] +#[allow(non_camel_case_types)] +pub type ftest = f64; -// #[cfg(not(test_int_bits = "64"))] -// #[allow(non_camel_case_types)] -// pub type ftest = f32; +#[cfg(not(test_int_bits = "64"))] +#[allow(non_camel_case_types)] +pub type ftest = f32; -// #[cfg(feature = "nightly")] -// #[cfg(test_int_bits = "64")] -// pub type FTEST = crate::float::Float<8, 52>; +#[cfg(feature = "float")] +#[cfg(test_int_bits = "64")] +pub type FTEST = crate::float::Float<8, 52>; -// #[cfg(feature = "nightly")] -// #[cfg(not(test_int_bits = "64"))] -// pub type FTEST = crate::float::Float<4, 23>; \ No newline at end of file +#[cfg(feature = "float")] +#[cfg(not(test_int_bits = "64"))] +pub type FTEST = crate::float::Float<4, 23>; \ No newline at end of file