strafe-project/fixed_wide/src/fixed.rs

751 lines
23 KiB
Rust
Raw Normal View History

use bnum::{BInt,cast::As};
2024-08-23 20:42:44 +00:00
2024-09-12 19:16:41 +00:00
#[derive(Clone,Copy,Debug,Default,Hash)]
2024-09-06 19:49:10 +00:00
/// A Fixed point number for which multiply operations widen the bits in the output. (when the wide-mul feature is enabled)
2024-09-03 00:03:01 +00:00
/// N is the number of u64s to use
/// F is the number of fractional bits (always N*32 lol)
pub struct Fixed<const N:usize,const F:usize>{
pub(crate)bits:BInt<{N}>,
2024-08-23 20:00:22 +00:00
}
2024-08-23 20:42:44 +00:00
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> Fixed<N,F>{
pub const MAX:Self=Self::from_bits(BInt::<N>::MAX);
pub const MIN:Self=Self::from_bits(BInt::<N>::MIN);
pub const ZERO:Self=Self::from_bits(BInt::<N>::ZERO);
pub const EPSILON:Self=Self::from_bits(BInt::<N>::ONE);
pub const NEG_EPSILON:Self=Self::from_bits(BInt::<N>::NEG_ONE);
pub const ONE:Self=Self::from_bits(BInt::<N>::ONE.shl(F as u32));
pub const TWO:Self=Self::from_bits(BInt::<N>::TWO.shl(F as u32));
pub const HALF:Self=Self::from_bits(BInt::<N>::ONE.shl(F as u32-1));
pub const NEG_ONE:Self=Self::from_bits(BInt::<N>::NEG_ONE.shl(F as u32));
pub const NEG_TWO:Self=Self::from_bits(BInt::<N>::NEG_TWO.shl(F as u32));
pub const NEG_HALF:Self=Self::from_bits(BInt::<N>::NEG_ONE.shl(F as u32-1));
2024-08-30 03:07:18 +00:00
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> Fixed<N,F>{
2024-08-30 03:11:07 +00:00
#[inline]
2024-09-03 00:03:01 +00:00
pub const fn from_bits(bits:BInt::<N>)->Self{
2024-08-29 20:15:17 +00:00
Self{
bits,
}
}
2024-08-30 03:11:07 +00:00
#[inline]
2024-09-03 00:03:01 +00:00
pub const fn to_bits(self)->BInt<N>{
2024-08-29 20:15:17 +00:00
self.bits
}
2024-08-30 03:11:07 +00:00
#[inline]
2024-09-11 19:06:58 +00:00
pub const fn raw_digit(value:i64)->Self{
let mut digits=[0u64;N];
digits[0]=value.abs() as u64;
//sign bit
digits[N-1]|=(value&i64::MIN) as u64;
Self::from_bits(BInt::from_bits(bnum::BUint::from_digits(digits)))
}
2024-09-11 22:15:06 +00:00
#[inline]
pub const fn is_zero(self)->bool{
self.bits.is_zero()
}
#[inline]
pub const fn is_negative(self)->bool{
self.bits.is_negative()
}
#[inline]
pub const fn is_positive(self)->bool{
self.bits.is_positive()
}
#[inline]
pub const fn abs(self)->Self{
Self::from_bits(self.bits.abs())
}
2024-09-11 19:06:58 +00:00
}
impl<const F:usize> Fixed<1,F>{
/// My old code called this function everywhere so let's provide it
#[inline]
2024-08-30 00:22:45 +00:00
pub const fn raw(value:i64)->Self{
Self::from_bits(BInt::from_bits(bnum::BUint::from_digit(value as u64)))
}
2024-09-16 22:48:52 +00:00
#[inline]
pub const fn to_raw(self)->i64{
2024-10-01 00:08:12 +00:00
let &[digit]=self.to_bits().to_bits().digits();
digit as i64
2024-09-16 22:48:52 +00:00
}
2024-08-27 23:49:49 +00:00
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize,T> From<T> for Fixed<N,F>
2024-08-26 22:40:27 +00:00
where
2024-09-03 00:03:01 +00:00
BInt<N>:From<T>
2024-08-26 22:40:27 +00:00
{
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-26 22:40:27 +00:00
fn from(value:T)->Self{
2024-09-03 00:03:01 +00:00
Self::from_bits(BInt::<{N}>::from(value)<<F as u32)
2024-08-23 21:17:48 +00:00
}
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> PartialEq for Fixed<N,F>{
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-23 21:17:48 +00:00
fn eq(&self,other:&Self)->bool{
2024-08-23 23:53:54 +00:00
self.bits.eq(&other.bits)
2024-08-23 21:17:48 +00:00
}
}
2024-09-10 18:14:12 +00:00
impl<const N:usize,const F:usize,T> PartialEq<T> for Fixed<N,F>
where
T:Copy,
BInt::<N>:From<T>,
{
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-10 18:14:12 +00:00
fn eq(&self,&other:&T)->bool{
self.bits.eq(&other.into())
}
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> Eq for Fixed<N,F>{}
2024-08-23 21:17:48 +00:00
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> PartialOrd for Fixed<N,F>{
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-27 21:23:38 +00:00
fn partial_cmp(&self,other:&Self)->Option<std::cmp::Ordering>{
self.bits.partial_cmp(&other.bits)
}
}
2024-09-10 18:14:12 +00:00
impl<const N:usize,const F:usize,T> PartialOrd<T> for Fixed<N,F>
where
T:Copy,
BInt::<N>:From<T>,
{
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-10 18:14:12 +00:00
fn partial_cmp(&self,&other:&T)->Option<std::cmp::Ordering>{
self.bits.partial_cmp(&other.into())
}
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> Ord for Fixed<N,F>{
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-27 21:23:38 +00:00
fn cmp(&self,other:&Self)->std::cmp::Ordering{
self.bits.cmp(&other.bits)
}
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> std::ops::Neg for Fixed<N,F>{
2024-08-26 23:03:02 +00:00
type Output=Self;
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-26 23:03:02 +00:00
fn neg(self)->Self{
2024-08-30 03:07:18 +00:00
Self::from_bits(self.bits.neg())
2024-08-26 23:03:02 +00:00
}
}
2024-09-09 20:00:51 +00:00
impl<const N:usize,const F:usize> std::iter::Sum for Fixed<N,F>{
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-09 20:00:51 +00:00
fn sum<I:Iterator<Item=Self>>(iter:I)->Self{
let mut sum=Self::ZERO;
for elem in iter{
sum+=elem;
}
sum
}
}
2024-08-26 23:03:02 +00:00
2024-09-17 21:47:14 +00:00
const fn signed_shift(lhs:u64,rhs:i32)->u64{
if rhs.is_negative(){
lhs>>-rhs
}else{
lhs<<rhs
}
}
2024-09-11 19:06:58 +00:00
macro_rules! impl_into_float {
2024-09-17 21:47:14 +00:00
( $output: ty, $unsigned:ty, $exponent_bits:expr, $mantissa_bits:expr ) => {
2024-09-11 19:06:58 +00:00
impl<const N:usize,const F:usize> Into<$output> for Fixed<N,F>{
#[inline]
fn into(self)->$output{
2024-09-17 21:47:14 +00:00
const DIGIT_SHIFT:u32=6;//Log2[64]
// SBBB BBBB
// 1001 1110 0000 0000
let sign=if self.bits.is_negative(){(1 as $unsigned)<<(<$unsigned>::BITS-1)}else{0};
let unsigned=self.bits.unsigned_abs();
let most_significant_bit=unsigned.bits();
let exp=if unsigned.is_zero(){
0
}else{
let msb=most_significant_bit as $unsigned;
let _127=((1 as $unsigned)<<($exponent_bits-1))-1;
let msb_offset=msb+_127-1-F as $unsigned;
msb_offset<<($mantissa_bits-1)
};
let digits=unsigned.digits();
let digit_index=most_significant_bit>>DIGIT_SHIFT;
let digit=digits[digit_index as usize];
//How many bits does the mantissa take from this digit
let take_bits=most_significant_bit-(digit_index<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let mut unmasked_mant=signed_shift(digit,rest_of_mantissa) as $unsigned;
if 0<rest_of_mantissa&&digit_index!=0{
//take the next digit down and shove some of its bits onto the bottom of the mantissa
let digit=digits[digit_index as usize-1];
let take_bits=most_significant_bit-((digit_index-1)<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let unmasked_mant2=signed_shift(digit,rest_of_mantissa) as $unsigned;
unmasked_mant|=unmasked_mant2;
2024-09-11 19:06:58 +00:00
}
2024-09-17 21:47:14 +00:00
let mant=unmasked_mant&((1 as $unsigned)<<($mantissa_bits-1))-1;
let bits=sign|exp|mant;
<$output>::from_bits(bits)
2024-09-11 19:06:58 +00:00
}
}
}
}
2024-09-17 21:47:14 +00:00
impl_into_float!(f32,u32,8,24);
impl_into_float!(f64,u64,11,53);
2024-09-11 19:06:58 +00:00
impl<const N:usize,const F:usize> core::fmt::Display for Fixed<N,F>{
#[inline]
fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
let float:f32=(*self).into();
core::write!(f,"{:.3}",float)
}
}
macro_rules! impl_additive_operator {
2024-08-26 22:57:02 +00:00
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> $struct<N,F>{
#[inline]
pub const fn $method(self, other: Self) -> Self {
Self::from_bits(self.bits.$method(other.bits))
}
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> core::ops::$trait for $struct<N,F>{
2024-08-29 17:42:11 +00:00
type Output = $output;
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-29 17:42:11 +00:00
fn $method(self, other: Self) -> Self::Output {
self.$method(other)
2024-08-29 17:42:11 +00:00
}
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
2024-08-29 17:42:11 +00:00
where
2024-09-03 00:03:01 +00:00
BInt::<N>:From<U>,
2024-08-29 17:42:11 +00:00
{
type Output = $output;
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-29 17:42:11 +00:00
fn $method(self, other: U) -> Self::Output {
2024-09-03 00:03:01 +00:00
Self::from_bits(self.bits.$method(BInt::<N>::from(other).shl(F as u32)))
2024-08-29 17:42:11 +00:00
}
}
};
2024-08-23 23:48:24 +00:00
}
macro_rules! impl_additive_assign_operator {
2024-08-29 17:42:11 +00:00
( $struct: ident, $trait: ident, $method: ident ) => {
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> core::ops::$trait for $struct<N,F>{
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-29 17:42:11 +00:00
fn $method(&mut self, other: Self) {
self.bits.$method(other.bits);
}
}
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
2024-08-29 17:42:11 +00:00
where
2024-09-03 00:03:01 +00:00
BInt::<N>:From<U>,
2024-08-29 17:42:11 +00:00
{
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-29 17:42:11 +00:00
fn $method(&mut self, other: U) {
2024-09-09 20:00:57 +00:00
self.bits.$method(BInt::<N>::from(other).shl(F as u32));
2024-08-29 17:42:11 +00:00
}
}
};
2024-08-26 22:40:27 +00:00
}
// Impl arithmetic pperators
impl_additive_assign_operator!( Fixed, AddAssign, add_assign );
impl_additive_operator!( Fixed, Add, add, Self );
impl_additive_assign_operator!( Fixed, SubAssign, sub_assign );
impl_additive_operator!( Fixed, Sub, sub, Self );
impl_additive_assign_operator!( Fixed, RemAssign, rem_assign );
impl_additive_operator!( Fixed, Rem, rem, Self );
2024-08-26 22:40:27 +00:00
// Impl bitwise operators
impl_additive_assign_operator!( Fixed, BitAndAssign, bitand_assign );
impl_additive_operator!( Fixed, BitAnd, bitand, Self );
impl_additive_assign_operator!( Fixed, BitOrAssign, bitor_assign );
impl_additive_operator!( Fixed, BitOr, bitor, Self );
impl_additive_assign_operator!( Fixed, BitXorAssign, bitxor_assign );
impl_additive_operator!( Fixed, BitXor, bitxor, Self );
2024-09-06 19:49:10 +00:00
// non-wide operators. The result is the same width as the inputs.
// This macro is not used in the default configuration.
#[allow(unused_macros)]
macro_rules! impl_multiplicative_operator_not_const_generic {
2024-09-09 20:00:51 +00:00
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
2024-09-03 00:03:01 +00:00
impl<const F:usize> core::ops::$trait for $struct<$width,F>{
type Output = $output;
2024-09-10 19:45:33 +00:00
#[inline]
fn $method(self, other: Self) -> Self::Output {
paste::item!{
self.[<fixed_ $method>](other)
}
}
}
};
}
macro_rules! impl_multiplicative_assign_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $non_assign_method: ident ), $width:expr ) => {
2024-09-03 00:03:01 +00:00
impl<const F:usize> core::ops::$trait for $struct<$width,F>{
2024-09-10 19:45:33 +00:00
#[inline]
fn $method(&mut self, other: Self) {
paste::item!{
*self=self.[<fixed_ $non_assign_method>](other);
}
}
}
};
}
macro_rules! impl_multiply_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> $struct<$width,F>{
paste::item!{
#[inline]
2024-09-21 22:42:29 +00:00
pub fn [<fixed_ $method>](self, rhs: Self) -> Self {
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Self::from_bits(out.shr(F as u32).as_())
}else{
-Self::from_bits(out.shr(F as u32).as_())
}
}
}
}
2024-09-06 19:49:10 +00:00
#[cfg(not(feature="wide-mul"))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
2024-09-13 21:00:40 +00:00
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<i64> for Fixed<$width,{$width*32}>{
type Output=Self;
#[inline]
fn divide(self, other: i64)->Self::Output{
Self::from_bits(self.bits/BInt::from(other))
}
}
}
}
2024-09-09 20:00:51 +00:00
macro_rules! impl_divide_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> $struct<$width,F>{
paste::item!{
#[inline]
pub fn [<fixed_ $method>](self, other: Self) -> Self {
2024-09-03 00:03:01 +00:00
//this only needs to be $width+F as u32/64+1 but MUH CONST GENERICS!!!!!
let lhs=self.bits.as_::<BInt::<{$width*2}>>().shl(F as u32);
let rhs=other.bits.as_::<BInt::<{$width*2}>>();
2024-08-30 03:07:18 +00:00
Self::from_bits(lhs.div(rhs).as_())
}
}
}
2024-09-10 19:04:18 +00:00
#[cfg(all(not(feature="wide-mul"),not(feature="deferred-division")))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
2024-09-11 19:59:33 +00:00
#[cfg(all(not(feature="wide-mul"),feature="deferred-division"))]
impl<const F:usize> ratio_ops::ratio::Divide for $struct<$width,F>{
type Output = $output;
#[inline]
fn divide(self, other: Self) -> Self::Output {
paste::item!{
self.[<fixed_ $method>](other)
}
}
}
};
}
macro_rules! impl_multiplicative_operator {
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
where
2024-09-03 00:03:01 +00:00
BInt::<N>:From<U>+core::ops::$trait,
{
type Output = $output;
2024-09-10 19:45:33 +00:00
#[inline]
fn $method(self, other: U) -> Self::Output {
2024-09-03 00:03:01 +00:00
Self::from_bits(self.bits.$method(BInt::<N>::from(other)))
}
}
};
}
macro_rules! impl_multiplicative_assign_operator {
( $struct: ident, $trait: ident, $method: ident ) => {
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
where
2024-09-03 00:03:01 +00:00
BInt::<N>:From<U>+core::ops::$trait,
{
2024-09-10 19:45:33 +00:00
#[inline]
fn $method(&mut self, other: U) {
2024-09-03 00:03:01 +00:00
self.bits.$method(BInt::<N>::from(other));
}
}
};
}
2024-09-09 20:00:51 +00:00
macro_rules! macro_repeated{
(
$macro:ident,
$any:tt,
$($repeated:tt),*
)=>{
$(
$macro!($any, $repeated);
)*
};
}
2024-09-09 20:00:51 +00:00
macro_rules! macro_16 {
( $macro: ident, $any:tt ) => {
macro_repeated!($macro,$any,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
}
}
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, MulAssign, mul_assign, mul) );
2024-09-09 20:00:51 +00:00
macro_16!( impl_multiply_operator_not_const_generic, (Fixed, Mul, mul, Self) );
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, DivAssign, div_assign, div) );
2024-09-09 20:00:51 +00:00
macro_16!( impl_divide_operator_not_const_generic, (Fixed, Div, div, Self) );
impl_multiplicative_assign_operator!( Fixed, MulAssign, mul_assign );
impl_multiplicative_operator!( Fixed, Mul, mul, Self );
impl_multiplicative_assign_operator!( Fixed, DivAssign, div_assign );
impl_multiplicative_operator!( Fixed, Div, div, Self );
2024-09-10 19:04:18 +00:00
#[cfg(feature="deferred-division")]
2024-09-06 19:49:10 +00:00
impl<const LHS_N:usize,const LHS_F:usize,const RHS_N:usize,const RHS_F:usize> core::ops::Div<Fixed<RHS_N,RHS_F>> for Fixed<LHS_N,LHS_F>{
2024-09-10 19:04:18 +00:00
type Output=ratio_ops::ratio::Ratio<Fixed<LHS_N,LHS_F>,Fixed<RHS_N,RHS_F>>;
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-06 19:49:10 +00:00
fn div(self, other: Fixed<RHS_N,RHS_F>)->Self::Output{
2024-09-10 19:04:18 +00:00
ratio_ops::ratio::Ratio::new(self,other)
2024-09-06 19:49:10 +00:00
}
}
2024-09-23 18:20:11 +00:00
#[cfg(feature="deferred-division")]
impl<const N:usize,const F:usize> ratio_ops::ratio::Parity for Fixed<N,F>{
fn parity(&self)->bool{
self.is_negative()
}
}
2024-08-27 21:58:32 +00:00
macro_rules! impl_shift_operator {
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> core::ops::$trait<u32> for $struct<N,F>{
2024-08-29 17:42:11 +00:00
type Output = $output;
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-29 17:42:11 +00:00
fn $method(self, other: u32) -> Self::Output {
2024-08-30 03:07:18 +00:00
Self::from_bits(self.bits.$method(other))
2024-08-29 17:42:11 +00:00
}
}
};
2024-08-27 21:58:32 +00:00
}
macro_rules! impl_shift_assign_operator {
2024-08-29 17:42:11 +00:00
( $struct: ident, $trait: ident, $method: ident ) => {
2024-09-03 00:03:01 +00:00
impl<const N:usize,const F:usize> core::ops::$trait<u32> for $struct<N,F>{
2024-09-10 19:45:33 +00:00
#[inline]
2024-08-29 17:42:11 +00:00
fn $method(&mut self, other: u32) {
self.bits.$method(other);
}
}
};
2024-08-27 21:58:32 +00:00
}
impl_shift_assign_operator!( Fixed, ShlAssign, shl_assign );
impl_shift_operator!( Fixed, Shl, shl, Self );
impl_shift_assign_operator!( Fixed, ShrAssign, shr_assign );
impl_shift_operator!( Fixed, Shr, shr, Self );
2024-09-02 23:15:17 +00:00
2024-09-06 19:49:10 +00:00
// wide operators. The result width is the sum of the input widths, i.e. none of the multiplication
#[allow(unused_macros)]
2024-09-06 19:49:10 +00:00
macro_rules! impl_wide_operators{
($lhs:expr,$rhs:expr)=>{
impl core::ops::Mul<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-06 19:49:10 +00:00
fn mul(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
paste::item!{
self.[<wide_mul_ $lhs _ $rhs>](other)
}
}
}
2024-09-10 19:04:18 +00:00
#[cfg(not(feature="deferred-division"))]
2024-09-06 19:49:10 +00:00
impl core::ops::Div<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-06 19:49:10 +00:00
fn div(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
paste::item!{
self.[<wide_div_ $lhs _ $rhs>](other)
}
}
}
2024-09-11 19:59:33 +00:00
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
#[inline]
fn divide(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
paste::item!{
self.[<wide_div_ $lhs _ $rhs>](other)
}
}
}
2024-09-06 19:49:10 +00:00
}
}
2024-09-02 23:15:17 +00:00
// WIDE MUL: multiply into a wider type
// let a = I32F32::ONE;
// let b:I64F64 = a.wide_mul(a);
2024-09-09 21:45:47 +00:00
macro_rules! impl_wide_not_const_generic{
2024-09-09 20:00:51 +00:00
(
(),
($lhs:expr,$rhs:expr)
)=>{
2024-09-03 00:03:01 +00:00
impl Fixed<$lhs,{$lhs*32}>
2024-09-02 23:15:17 +00:00
{
2024-09-02 23:35:01 +00:00
paste::item!{
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-03 00:03:01 +00:00
pub fn [<wide_mul_ $lhs _ $rhs>](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
2024-09-09 21:45:47 +00:00
let lhs=self.bits.as_::<BInt<{$lhs+$rhs}>>();
let rhs=rhs.bits.as_::<BInt<{$lhs+$rhs}>>();
Fixed::from_bits(lhs*rhs)
}
/// This operation cannot represent the fraction exactly,
/// but it shapes the output to have precision for the
/// largest and smallest possible fractions.
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-09 21:45:47 +00:00
pub fn [<wide_div_ $lhs _ $rhs>](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
// (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
let lhs=self.bits.as_::<BInt<{$lhs+$rhs}>>().shl($rhs*64);
let rhs=rhs.bits.as_::<BInt<{$lhs+$rhs}>>();
Fixed::from_bits(lhs/rhs)
2024-09-02 23:35:01 +00:00
}
2024-09-02 23:15:17 +00:00
}
}
2024-09-06 19:49:10 +00:00
#[cfg(feature="wide-mul")]
impl_wide_operators!($lhs,$rhs);
2024-09-02 23:15:17 +00:00
};
}
macro_rules! impl_wide_same_size_not_const_generic{
(
(),
$width:expr
)=>{
impl Fixed<$width,{$width*32}>
{
paste::item!{
#[inline]
pub fn [<wide_mul_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Fixed::from_bits(out)
}else{
2024-09-27 01:08:20 +00:00
// Normal neg is the cheapest negation operation
// And the inputs cannot reach the point where it matters
2024-09-27 01:08:20 +00:00
Fixed::from_bits(out.neg())
}
}
/// This operation cannot represent the fraction exactly,
/// but it shapes the output to have precision for the
/// largest and smallest possible fractions.
#[inline]
pub fn [<wide_div_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
// (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
let lhs=self.bits.as_::<BInt<{$width*2}>>().shl($width*64);
let rhs=rhs.bits.as_::<BInt<{$width*2}>>();
Fixed::from_bits(lhs/rhs)
}
}
}
#[cfg(feature="wide-mul")]
impl_wide_operators!($width,$width);
};
}
2024-09-02 23:15:17 +00:00
//const generics sidestepped wahoo
2024-09-09 20:00:51 +00:00
macro_repeated!(
2024-09-09 21:45:47 +00:00
impl_wide_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),
(1,2), (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),
(1,3),(2,3), (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),
(1,4),(2,4),(3,4), (5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),
(1,5),(2,5),(3,5),(4,5), (6,5),(7,5),(8,5),(9,5),(10,5),(11,5),
(1,6),(2,6),(3,6),(4,6),(5,6), (7,6),(8,6),(9,6),(10,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7), (8,7),(9,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8), (9,8),
2024-09-09 20:00:51 +00:00
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),
(1,11),(2,11),(3,11),(4,11),(5,11),
(1,12),(2,12),(3,12),(4,12),
(1,13),(2,13),(3,13),
(1,14),(2,14),
(1,15)
2024-09-02 23:15:17 +00:00
);
macro_repeated!(
impl_wide_same_size_not_const_generic,(),
1,2,3,4,5,6,7,8
);
2024-09-16 22:02:31 +00:00
pub trait Fix<Out>{
fn fix(self)->Out;
}
macro_rules! impl_fix_rhs_lt_lhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
2024-09-17 22:10:07 +00:00
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits.shr(($lhs-$rhs)*32)))
}
}
}
2024-09-16 22:02:31 +00:00
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
2024-09-02 23:15:17 +00:00
}
}
macro_rules! impl_fix_lhs_lt_rhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits).shl(($rhs-$lhs)*32))
}
2024-09-03 00:09:37 +00:00
}
}
2024-09-16 22:02:31 +00:00
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
self
}
}
}
2024-09-16 22:02:31 +00:00
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
// I LOVE NOT BEING ABLE TO USE CONST GENERICS
macro_repeated!(
impl_fix_rhs_lt_lhs_not_const_generic,(),
2024-09-16 22:02:43 +00:00
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(12,11),(13,11),(14,11),(15,11),(16,11),
(13,12),(14,12),(15,12),(16,12),
(14,13),(15,13),(16,13),
(15,14),(16,14),
(16,15)
);
macro_repeated!(
impl_fix_lhs_lt_rhs_not_const_generic,(),
(1,2),
(1,3),(2,3),
(1,4),(2,4),(3,4),
(1,5),(2,5),(3,5),(4,5),
(1,6),(2,6),(3,6),(4,6),(5,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16)
);
macro_repeated!(
impl_fix_lhs_eq_rhs_not_const_generic,(),
(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16)
);
macro_rules! impl_not_const_generic{
($n:expr,$_2n:expr)=>{
2024-09-03 00:03:01 +00:00
impl Fixed<$n,{$n*32}>{
2024-09-02 23:35:01 +00:00
paste::item!{
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-02 23:15:17 +00:00
pub fn sqrt_unchecked(self)->Self{
//1<<max_shift must be the minimum power of two which when squared is greater than self
//calculating max_shift:
//1. count "used" bits to the left of the decimal, not including the sign bit (so -1)
//2. divide by 2 via >>1 (sqrt-ish)
//3. add on fractional offset
//Voila
2024-09-03 00:03:01 +00:00
let used_bits=self.bits.bits() as i32-1-($n*32) as i32;
let max_shift=((used_bits>>1)+($n*32) as i32) as u32;
2024-09-02 23:15:17 +00:00
let mut result=Self::ZERO;
//resize self to match the wide mul output
let wide_self=self.[<fix_ $_2n>]();
2024-09-02 23:15:17 +00:00
//descend down the bits and check if flipping each bit would push the square over the input value
for shift in (0..=max_shift).rev(){
let new_result={
let mut bits=result.to_bits().to_bits();
bits.set_bit(shift,true);
Self::from_bits(BInt::from_bits(bits))
};
if new_result.[<wide_mul_ $n _ $n>](new_result)<=wide_self{
2024-09-02 23:15:17 +00:00
result=new_result;
}
}
result
}
2024-09-02 23:35:01 +00:00
}
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-02 23:15:17 +00:00
pub fn sqrt(self)->Self{
if self<Self::ZERO{
panic!("Square root less than zero")
}else{
self.sqrt_unchecked()
}
}
2024-09-10 19:45:33 +00:00
#[inline]
2024-09-02 23:15:17 +00:00
pub fn sqrt_checked(self)->Option<Self>{
if self<Self::ZERO{
None
}else{
Some(self.sqrt_unchecked())
}
}
}
}
}
impl_not_const_generic!(1,2);
impl_not_const_generic!(2,4);
impl_not_const_generic!(3,6);
impl_not_const_generic!(4,8);
impl_not_const_generic!(5,10);
impl_not_const_generic!(6,12);
impl_not_const_generic!(7,14);
impl_not_const_generic!(8,16);