From db5c37c2fb53d36909ab0e82f988a0709a8cc0a6 Mon Sep 17 00:00:00 2001 From: Quaternions Date: Wed, 11 Sep 2024 13:59:33 -0700 Subject: [PATCH] implement 'fix' function that changes the fixed point --- fixed_wide/src/fixed.rs | 126 ++++++++++++++++++++++++++++++++------- fixed_wide/src/tests.rs | 5 ++ fixed_wide/src/zeroes.rs | 7 +-- 3 files changed, 111 insertions(+), 27 deletions(-) diff --git a/fixed_wide/src/fixed.rs b/fixed_wide/src/fixed.rs index 6a86be8..91d4eae 100644 --- a/fixed_wide/src/fixed.rs +++ b/fixed_wide/src/fixed.rs @@ -380,6 +380,7 @@ impl_shift_operator!( Fixed, Shr, shr, Self ); // wide operators. The result width is the sum of the input widths, i.e. none of the multiplication +#[allow(unused_macros)] macro_rules! impl_wide_operators{ ($lhs:expr,$rhs:expr)=>{ impl core::ops::Mul> for Fixed<$lhs,{$lhs*32}>{ @@ -467,21 +468,101 @@ macro_repeated!( (1,14),(2,14), (1,15) ); -impl Fixed{ - #[inline] - pub fn resize_into(self)->Fixed{ - Fixed::from_bits(self.bits.as_::>()) + +macro_rules! impl_fix_rhs_lt_lhs_not_const_generic{ + ( + (), + ($lhs:expr,$rhs:expr) + )=>{ + impl Fixed<$lhs,{$lhs*32}> + { + paste::item!{ + #[inline] + pub fn [](self)->Fixed<$rhs,{$rhs*32}>{ + Fixed::from_bits(bnum::cast::As::as_::>(self.bits).shr(($lhs-$rhs)*32)) + } + } + } + } +} +macro_rules! impl_fix_lhs_lt_rhs_not_const_generic{ + ( + (), + ($lhs:expr,$rhs:expr) + )=>{ + impl Fixed<$lhs,{$lhs*32}> + { + paste::item!{ + #[inline] + pub fn [](self)->Fixed<$rhs,{$rhs*32}>{ + Fixed::from_bits(bnum::cast::As::as_::>(self.bits).shl(($rhs-$lhs)*32)) + } + } + } + } +} +macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{ + ( + (), + ($lhs:expr,$rhs:expr) + )=>{ + impl Fixed<$lhs,{$lhs*32}> + { + paste::item!{ + #[inline] + pub fn [](self)->Fixed<$rhs,{$rhs*32}>{ + self + } + } + } } } +// I LOVE NOT BEING ABLE TO USE CONST GENERICS + +macro_repeated!( + impl_fix_rhs_lt_lhs_not_const_generic,(), + (2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1), + (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2), + (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3), + (5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4), + (6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5), + (7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6), + (8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7), + (9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8), + (10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9), + (11,10),(12,10),(13,10),(14,10),(15,10),(16,10), + (12,11),(13,11),(14,11),(15,11),(16,11), + (13,12),(14,12),(15,12),(16,12), + (14,13),(15,13),(16,13), + (15,14),(16,14), + (16,15) +); +macro_repeated!( + impl_fix_lhs_lt_rhs_not_const_generic,(), + (1,2), + (1,3),(2,3), + (1,4),(2,4),(3,4), + (1,5),(2,5),(3,5),(4,5), + (1,6),(2,6),(3,6),(4,6),(5,6), + (1,7),(2,7),(3,7),(4,7),(5,7),(6,7), + (1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8), + (1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9), + (1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10), + (1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11), + (1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12), + (1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13), + (1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14), + (1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15), + (1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16) +); +macro_repeated!( + impl_fix_lhs_eq_rhs_not_const_generic,(), + (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16) +); + macro_rules! impl_not_const_generic{ - ($n:expr)=>{ - impl Fixed<{$n*2},{$n*2*32}>{ - #[inline] - pub fn halve_precision(self)->Fixed<$n,{$n*32}>{ - Fixed::from_bits(bnum::cast::As::as_(self.bits.shr($n*32))) - } - } + ($n:expr,$_2n:expr)=>{ impl Fixed<$n,{$n*32}>{ paste::item!{ #[inline] @@ -496,13 +577,12 @@ macro_rules! impl_not_const_generic{ let max_shift=((used_bits>>1)+($n*32) as i32) as u32; let mut result=Self::ZERO; - //multiply by one to make the types match (hack) - //TODO: use resize method - let wide_self:::Output=self*Self::ONE; + //resize self to match the wide mul output + let wide_self=self.[](); //descend down the bits and check if flipping each bit would push the square over the input value for shift in (0..=max_shift).rev(){ let new_result=result|Self::from_bits(BInt::from_bits(bnum::BUint::power_of_two(shift))); - if new_result*new_result<=wide_self{ + if new_result.[](new_result)<=wide_self{ result=new_result; } } @@ -528,11 +608,11 @@ macro_rules! impl_not_const_generic{ } } } -impl_not_const_generic!(1); -impl_not_const_generic!(2); -impl_not_const_generic!(3); -impl_not_const_generic!(4); -impl_not_const_generic!(5); -impl_not_const_generic!(6); -impl_not_const_generic!(7); -impl_not_const_generic!(8); +impl_not_const_generic!(1,2); +impl_not_const_generic!(2,4); +impl_not_const_generic!(3,6); +impl_not_const_generic!(4,8); +impl_not_const_generic!(5,10); +impl_not_const_generic!(6,12); +impl_not_const_generic!(7,14); +impl_not_const_generic!(8,16); diff --git a/fixed_wide/src/tests.rs b/fixed_wide/src/tests.rs index 2f9aaa6..f7278ce 100644 --- a/fixed_wide/src/tests.rs +++ b/fixed_wide/src/tests.rs @@ -52,6 +52,11 @@ fn test_bint(){ assert_eq!(a*2,I32F32::from(2)); } +#[test] +fn test_fix(){ + let a=I32F32::ONE; + assert_eq!(a.fix_8(),I256F256::ONE); +} #[test] fn test_sqrt(){ let a=I32F32::ONE*4; diff --git a/fixed_wide/src/zeroes.rs b/fixed_wide/src/zeroes.rs index 3634a02..621e22e 100644 --- a/fixed_wide/src/zeroes.rs +++ b/fixed_wide/src/zeroes.rs @@ -12,13 +12,12 @@ macro_rules! impl_zeroes{ Ordering::Equal=>return ArrayVec::from_iter(Self::zeroes1(a0,a1).into_iter()), Ordering::Less=>true, }; - paste::item!{ let radicand=a1*a1-a2*a0*4; - } match radicand.cmp(&::Output::ZERO){ Ordering::Greater=>{ - //TODO: use resize method - let planar_radicand:Self=radicand.sqrt().halve_precision(); + paste::item!{ + let planar_radicand=radicand.sqrt().[](); + } //sort roots ascending and avoid taking the difference of large numbers let zeroes=match (a2pos,Self::ZERO[(-a1-planar_radicand)/(a2*2),(a0*2)/(-a1-planar_radicand)],