30 Commits

Author SHA1 Message Date
1e83366386 assert range on truncation 2024-09-27 12:27:31 -07:00
438d0ec6ec test zeroes 2024-09-26 18:08:33 -07:00
94e23b7f0f idk what I'm doing 2024-09-26 18:08:20 -07:00
e6cd239dcb fix zeroes 2024-09-26 15:26:18 -07:00
8d97ffba92 column major 2024-09-26 15:06:05 -07:00
e46f4fb900 save a copy in sqrt using epic bnum 0.12 feature (pulled by yours truly) 2024-09-26 15:06:05 -07:00
2b58204cb9 update bnum 2024-09-26 15:06:05 -07:00
b91f061797 implement same-size wide mul more efficiently 2024-09-25 09:46:56 -07:00
102ea607ab Ratio Parity trait 2024-09-23 11:30:35 -07:00
ba357ee99b efficient fixed mul 2024-09-21 15:42:29 -07:00
546a4aa8c7 test negative 2024-09-18 11:50:34 -07:00
94cd23fe4b add ratio tests 2024-09-18 10:44:18 -07:00
bc773f7d45 test fix better 2024-09-17 15:10:11 -07:00
934475b959 fix fix 2024-09-17 15:10:07 -07:00
665d528b87 remove debug from float builder 2024-09-17 14:48:21 -07:00
865d7a7886 float tests 2024-09-17 14:47:18 -07:00
031cd6e771 float builder (debug version) 2024-09-17 14:47:14 -07:00
6dbe96fca2 Fixed<1,_>::to_raw() 2024-09-16 15:48:52 -07:00
655a6da251 cheese extrapolate div 2024-09-16 15:02:43 -07:00
a100f182e1 Fix trait 2024-09-16 15:02:31 -07:00
0734122e75 ratio: ord methods 2024-09-16 11:46:05 -07:00
4e284311e1 this depends on that 2024-09-15 20:30:09 -07:00
0cd28e402e fixed: special case for convenience 2024-09-13 14:00:40 -07:00
260ed0fd5c ratio: PartialEq, Eq, PartialOrd, Ord 2024-09-13 14:00:13 -07:00
ec82745c6d matrix: from_rows 2024-09-12 12:16:58 -07:00
10e56fb0b9 default numba (use with care) 2024-09-12 12:16:58 -07:00
5646bd3b5a fixed width specific impls 2024-09-12 10:52:57 -07:00
6cb639317c const helpers 2024-09-11 15:15:06 -07:00
db5c37c2fb implement 'fix' function that changes the fixed point 2024-09-11 14:06:34 -07:00
a73a32f2ad Divide trait 2024-09-11 13:29:13 -07:00
17 changed files with 707 additions and 111 deletions

4
fixed_wide/Cargo.lock generated

@ -10,9 +10,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "bnum"
version = "0.11.0"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e31ea183f6ee62ac8b8a8cf7feddd766317adfb13ff469de57ce033efd6a790"
checksum = "50202def95bf36cb7d1d7a7962cea1c36a3f8ad42425e5d2b71d7acb8041b5b8"
[[package]]
name = "fixed_wide"

@ -10,7 +10,7 @@ wide-mul=[]
zeroes=["dep:arrayvec"]
[dependencies]
bnum = "0.11.0"
bnum = "0.12.0"
arrayvec = { version = "0.7.6", optional = true }
paste = "1.0.15"
ratio_ops = { path = "../ratio_ops", optional = true }

@ -1,6 +1,6 @@
use bnum::{BInt,cast::As};
#[derive(Clone,Copy,Debug,Hash)]
#[derive(Clone,Copy,Debug,Default,Hash)]
/// A Fixed point number for which multiply operations widen the bits in the output. (when the wide-mul feature is enabled)
/// N is the number of u64s to use
/// F is the number of fractional bits (always N*32 lol)
@ -40,6 +40,22 @@ impl<const N:usize,const F:usize> Fixed<N,F>{
digits[N-1]|=(value&i64::MIN) as u64;
Self::from_bits(BInt::from_bits(bnum::BUint::from_digits(digits)))
}
#[inline]
pub const fn is_zero(self)->bool{
self.bits.is_zero()
}
#[inline]
pub const fn is_negative(self)->bool{
self.bits.is_negative()
}
#[inline]
pub const fn is_positive(self)->bool{
self.bits.is_positive()
}
#[inline]
pub const fn abs(self)->Self{
Self::from_bits(self.bits.abs())
}
}
impl<const F:usize> Fixed<1,F>{
/// My old code called this function everywhere so let's provide it
@ -47,6 +63,10 @@ impl<const F:usize> Fixed<1,F>{
pub const fn raw(value:i64)->Self{
Self::from_bits(BInt::from_bits(bnum::BUint::from_digit(value as u64)))
}
#[inline]
pub const fn to_raw(self)->i64{
self.to_bits().to_bits().digits()[0] as i64
}
}
impl<const N:usize,const F:usize,T> From<T> for Fixed<N,F>
@ -118,31 +138,56 @@ impl<const N:usize,const F:usize> std::iter::Sum for Fixed<N,F>{
}
}
const fn signed_shift(lhs:u64,rhs:i32)->u64{
if rhs.is_negative(){
lhs>>-rhs
}else{
lhs<<rhs
}
}
macro_rules! impl_into_float {
( $output: ty ) => {
( $output: ty, $unsigned:ty, $exponent_bits:expr, $mantissa_bits:expr ) => {
impl<const N:usize,const F:usize> Into<$output> for Fixed<N,F>{
#[inline]
fn into(self)->$output{
let mut total=0.0;
let bits=self.bits.to_bits();
let digits=bits.digits();
for (i,digit) in digits[0..N-1].iter().enumerate(){
// (i*64-F) as i32 will interpret the highest order bit as a sign bit but whatever
total+=(*digit as $output)*(2.0 as $output).powi((i*64-F) as i32);
const DIGIT_SHIFT:u32=6;//Log2[64]
// SBBB BBBB
// 1001 1110 0000 0000
let sign=if self.bits.is_negative(){(1 as $unsigned)<<(<$unsigned>::BITS-1)}else{0};
let unsigned=self.bits.unsigned_abs();
let most_significant_bit=unsigned.bits();
let exp=if unsigned.is_zero(){
0
}else{
let msb=most_significant_bit as $unsigned;
let _127=((1 as $unsigned)<<($exponent_bits-1))-1;
let msb_offset=msb+_127-1-F as $unsigned;
msb_offset<<($mantissa_bits-1)
};
let digits=unsigned.digits();
let digit_index=most_significant_bit>>DIGIT_SHIFT;
let digit=digits[digit_index as usize];
//How many bits does the mantissa take from this digit
let take_bits=most_significant_bit-(digit_index<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let mut unmasked_mant=signed_shift(digit,rest_of_mantissa) as $unsigned;
if 0<rest_of_mantissa&&digit_index!=0{
//take the next digit down and shove some of its bits onto the bottom of the mantissa
let digit=digits[digit_index as usize-1];
let take_bits=most_significant_bit-((digit_index-1)<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let unmasked_mant2=signed_shift(digit,rest_of_mantissa) as $unsigned;
unmasked_mant|=unmasked_mant2;
}
//most significant digit holds the sign bit
//assume we are using a number with at least 1 digit...
total+=((*digits.last().unwrap() as i64).abs() as $output)*(2.0 as $output).powi(((N-1)*64-F) as i32);
if self.bits.is_negative(){
total=-total;
}
total
let mant=unmasked_mant&((1 as $unsigned)<<($mantissa_bits-1))-1;
let bits=sign|exp|mant;
<$output>::from_bits(bits)
}
}
}
}
impl_into_float!(f32);
impl_into_float!(f64);
impl_into_float!(f32,u32,8,24);
impl_into_float!(f64,u64,11,53);
impl<const N:usize,const F:usize> core::fmt::Display for Fixed<N,F>{
#[inline]
@ -250,15 +295,27 @@ macro_rules! impl_multiply_operator_not_const_generic {
impl<const F:usize> $struct<$width,F>{
paste::item!{
#[inline]
pub fn [<fixed_ $method>](self, other: Self) -> Self {
let lhs=self.bits.as_::<BInt::<{$width*2}>>();
let rhs=other.bits.as_::<BInt::<{$width*2}>>();
Self::from_bits(lhs.mul(rhs).shr(F as u32).as_())
pub fn [<fixed_ $method>](self, rhs: Self) -> Self {
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Self::from_bits(out.shr(F as u32).as_())
}else{
-Self::from_bits(out.shr(F as u32).as_())
}
}
}
}
#[cfg(not(feature="wide-mul"))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<i64> for Fixed<$width,{$width*32}>{
type Output=Self;
#[inline]
fn divide(self, other: i64)->Self::Output{
Self::from_bits(self.bits/BInt::from(other))
}
}
}
}
macro_rules! impl_divide_operator_not_const_generic {
@ -276,6 +333,16 @@ macro_rules! impl_divide_operator_not_const_generic {
}
#[cfg(all(not(feature="wide-mul"),not(feature="deferred-division")))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
#[cfg(all(not(feature="wide-mul"),feature="deferred-division"))]
impl<const F:usize> ratio_ops::ratio::Divide for $struct<$width,F>{
type Output = $output;
#[inline]
fn divide(self, other: Self) -> Self::Output {
paste::item!{
self.[<fixed_ $method>](other)
}
}
}
};
}
@ -341,7 +408,12 @@ impl<const LHS_N:usize,const LHS_F:usize,const RHS_N:usize,const RHS_F:usize> co
ratio_ops::ratio::Ratio::new(self,other)
}
}
#[cfg(feature="deferred-division")]
impl<const N:usize,const F:usize> ratio_ops::ratio::Parity for Fixed<N,F>{
fn parity(&self)->bool{
self.is_negative()
}
}
macro_rules! impl_shift_operator {
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> core::ops::$trait<u32> for $struct<N,F>{
@ -370,6 +442,7 @@ impl_shift_operator!( Fixed, Shr, shr, Self );
// wide operators. The result width is the sum of the input widths, i.e. none of the multiplication
#[allow(unused_macros)]
macro_rules! impl_wide_operators{
($lhs:expr,$rhs:expr)=>{
impl core::ops::Mul<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
@ -391,6 +464,16 @@ macro_rules! impl_wide_operators{
}
}
}
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
#[inline]
fn divide(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
paste::item!{
self.[<wide_div_ $lhs _ $rhs>](other)
}
}
}
}
}
@ -427,18 +510,54 @@ macro_rules! impl_wide_not_const_generic{
impl_wide_operators!($lhs,$rhs);
};
}
macro_rules! impl_wide_same_size_not_const_generic{
(
(),
$width:expr
)=>{
impl Fixed<$width,{$width*32}>
{
paste::item!{
#[inline]
pub fn [<wide_mul_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Fixed::from_bits(out)
}else{
// Normal neg is the cheapest negation operation
// And the inputs cannot reach the point where it matters
Fixed::from_bits(out.neg())
}
}
/// This operation cannot represent the fraction exactly,
/// but it shapes the output to have precision for the
/// largest and smallest possible fractions.
#[inline]
pub fn [<wide_div_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
// (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
let lhs=self.bits.as_::<BInt<{$width*2}>>().shl($width*64);
let rhs=rhs.bits.as_::<BInt<{$width*2}>>();
Fixed::from_bits(lhs/rhs)
}
}
}
#[cfg(feature="wide-mul")]
impl_wide_operators!($width,$width);
};
}
//const generics sidestepped wahoo
macro_repeated!(
impl_wide_not_const_generic,(),
(1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),
(1,2),(2,2),(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),
(1,3),(2,3),(3,3),(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),
(1,4),(2,4),(3,4),(4,4),(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),
(1,5),(2,5),(3,5),(4,5),(5,5),(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),
(1,6),(2,6),(3,6),(4,6),(5,6),(6,6),(7,6),(8,6),(9,6),(10,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),(7,7),(8,7),(9,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),(8,8),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),
(1,2), (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),
(1,3),(2,3), (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),
(1,4),(2,4),(3,4), (5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),
(1,5),(2,5),(3,5),(4,5), (6,5),(7,5),(8,5),(9,5),(10,5),(11,5),
(1,6),(2,6),(3,6),(4,6),(5,6), (7,6),(8,6),(9,6),(10,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7), (8,7),(9,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8), (9,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),
(1,11),(2,11),(3,11),(4,11),(5,11),
@ -447,21 +566,133 @@ macro_repeated!(
(1,14),(2,14),
(1,15)
);
impl<const SRC:usize,const F:usize> Fixed<SRC,F>{
#[inline]
pub fn resize_into<const DST:usize>(self)->Fixed<DST,F>{
Fixed::from_bits(self.bits.as_::<BInt<DST>>())
macro_repeated!(
impl_wide_same_size_not_const_generic,(),
1,2,3,4,5,6,7,8
);
pub trait Fix<Out>{
fn fix(self)->Out;
}
macro_rules! impl_fix_rhs_lt_lhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
let max=bnum::cast::As::as_::<BInt::<$lhs>>(BInt::<$rhs>::MAX).shl(($lhs-$rhs)*32);
let min=bnum::cast::As::as_::<BInt::<$lhs>>(BInt::<$rhs>::MIN).shl(($lhs-$rhs)*32);
assert!(min<self.bits&&self.bits<max,"Truncation detected");
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits.shr(($lhs-$rhs)*32)))
}
}
}
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
macro_rules! impl_fix_lhs_lt_rhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits).shl(($rhs-$lhs)*32))
}
}
}
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
self
}
}
}
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
// I LOVE NOT BEING ABLE TO USE CONST GENERICS
macro_repeated!(
impl_fix_rhs_lt_lhs_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(12,11),(13,11),(14,11),(15,11),(16,11),
(13,12),(14,12),(15,12),(16,12),
(14,13),(15,13),(16,13),
(15,14),(16,14),
(16,15)
);
macro_repeated!(
impl_fix_lhs_lt_rhs_not_const_generic,(),
(1,2),
(1,3),(2,3),
(1,4),(2,4),(3,4),
(1,5),(2,5),(3,5),(4,5),
(1,6),(2,6),(3,6),(4,6),(5,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16)
);
macro_repeated!(
impl_fix_lhs_eq_rhs_not_const_generic,(),
(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16)
);
macro_rules! impl_not_const_generic{
($n:expr)=>{
impl Fixed<{$n*2},{$n*2*32}>{
#[inline]
pub fn halve_precision(self)->Fixed<$n,{$n*32}>{
Fixed::from_bits(bnum::cast::As::as_(self.bits.shr($n*32)))
}
}
($n:expr,$_2n:expr)=>{
impl Fixed<$n,{$n*32}>{
paste::item!{
#[inline]
@ -476,13 +707,16 @@ macro_rules! impl_not_const_generic{
let max_shift=((used_bits>>1)+($n*32) as i32) as u32;
let mut result=Self::ZERO;
//multiply by one to make the types match (hack)
//TODO: use resize method
let wide_self:<Self as core::ops::Mul>::Output=self*Self::ONE;
//resize self to match the wide mul output
let wide_self=self.[<fix_ $_2n>]();
//descend down the bits and check if flipping each bit would push the square over the input value
for shift in (0..=max_shift).rev(){
let new_result=result|Self::from_bits(BInt::from_bits(bnum::BUint::power_of_two(shift)));
if new_result*new_result<=wide_self{
let new_result={
let mut bits=result.to_bits().to_bits();
bits.set_bit(shift,true);
Self::from_bits(BInt::from_bits(bits))
};
if new_result.[<wide_mul_ $n _ $n>](new_result)<=wide_self{
result=new_result;
}
}
@ -508,11 +742,11 @@ macro_rules! impl_not_const_generic{
}
}
}
impl_not_const_generic!(1);
impl_not_const_generic!(2);
impl_not_const_generic!(3);
impl_not_const_generic!(4);
impl_not_const_generic!(5);
impl_not_const_generic!(6);
impl_not_const_generic!(7);
impl_not_const_generic!(8);
impl_not_const_generic!(1,2);
impl_not_const_generic!(2,4);
impl_not_const_generic!(3,6);
impl_not_const_generic!(4,8);
impl_not_const_generic!(5,10);
impl_not_const_generic!(6,12);
impl_not_const_generic!(7,14);
impl_not_const_generic!(8,16);

@ -4,13 +4,43 @@ use crate::types::I256F256;
#[test]
fn you_can_add_numbers(){
let a=I256F256::from((3i128*2).pow(4));
assert_eq!(a+a,I256F256::from((3i128*2).pow(4)*2))
assert_eq!(a+a,I256F256::from((3i128*2).pow(4)*2));
}
#[test]
fn to_f32(){
let a=I256F256::from(1)>>2;
let f:f32=a.into();
assert_eq!(f,0.25f32);
let f:f32=(-a).into();
assert_eq!(f,-0.25f32);
let a=I256F256::from(0);
let f:f32=(-a).into();
assert_eq!(f,0f32);
let a=I256F256::from(237946589723468975i64)<<32;
let f:f32=a.into();
assert_eq!(f,237946589723468975f32*2.0f32.powi(32));
}
#[test]
fn to_f64(){
let a=I256F256::from(1)>>2;
let f:f64=a.into();
assert_eq!(f,0.25f64);
let f:f64=(-a).into();
assert_eq!(f,-0.25f64);
let a=I256F256::from(0);
let f:f64=(-a).into();
assert_eq!(f,0f64);
let a=I256F256::from(237946589723468975i64)<<32;
let f:f64=a.into();
assert_eq!(f,237946589723468975f64*2.0f64.powi(32));
}
#[test]
fn you_can_shr_numbers(){
let a=I32F32::from(4);
assert_eq!(a>>1,I32F32::from(2))
assert_eq!(a>>1,I32F32::from(2));
}
#[test]
@ -52,6 +82,13 @@ fn test_bint(){
assert_eq!(a*2,I32F32::from(2));
}
#[test]
fn test_fix(){
assert_eq!(I32F32::ONE.fix_8(),I256F256::ONE);
assert_eq!(I32F32::ONE,I256F256::ONE.fix_1());
assert_eq!(I32F32::NEG_ONE.fix_8(),I256F256::NEG_ONE);
assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.fix_1());
}
#[test]
fn test_sqrt(){
let a=I32F32::ONE*4;
@ -102,3 +139,27 @@ fn test_sqrt_max(){
let a=I32F32::MAX;
test_exact(a);
}
#[test]
#[cfg(all(feature="zeroes",not(feature="deferred-division")))]
fn test_zeroes_normal(){
// (x-1)*(x+1)
// x^2-1
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE,I32F32::ONE]));
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE*3,I32F32::ONE*2,I32F32::ONE);
assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE*3,I32F32::ONE]));
}
#[test]
#[cfg(all(feature="zeroes",feature="deferred-division"))]
fn test_zeroes_deferred_division(){
// (x-1)*(x+1)
// x^2-1
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
assert_eq!(
zeroes,
arrayvec::ArrayVec::from_iter([
ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::NEG_ONE*2),
ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::ONE*2),
])
);
}

@ -10,15 +10,14 @@ macro_rules! impl_zeroes{
let a2pos=match a2.cmp(&Self::ZERO){
Ordering::Greater=>true,
Ordering::Equal=>return ArrayVec::from_iter(Self::zeroes1(a0,a1).into_iter()),
Ordering::Less=>true,
Ordering::Less=>false,
};
paste::item!{
let radicand=a1*a1-a2*a0*4;
}
match radicand.cmp(&<Self as core::ops::Mul>::Output::ZERO){
Ordering::Greater=>{
//TODO: use resize method
let planar_radicand:Self=radicand.sqrt().halve_precision();
paste::item!{
let planar_radicand=radicand.sqrt().[<fix_ $n>]();
}
//sort roots ascending and avoid taking the difference of large numbers
let zeroes=match (a2pos,Self::ZERO<a1){
(true, true )=>[(-a1-planar_radicand)/(a2*2),(a0*2)/(-a1-planar_radicand)],

5
linear_ops/Cargo.lock generated

@ -4,9 +4,9 @@ version = 3
[[package]]
name = "bnum"
version = "0.11.0"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e31ea183f6ee62ac8b8a8cf7feddd766317adfb13ff469de57ce033efd6a790"
checksum = "50202def95bf36cb7d1d7a7962cea1c36a3f8ad42425e5d2b71d7acb8041b5b8"
[[package]]
name = "fixed_wide"
@ -21,6 +21,7 @@ name = "linear_ops"
version = "0.1.0"
dependencies = [
"fixed_wide",
"paste",
"ratio_ops",
]

@ -4,12 +4,15 @@ version = "0.1.0"
edition = "2021"
[features]
default=["named-fields"]
default=["named-fields","fixed-wide"]
named-fields=[]
fixed-wide=["dep:fixed_wide","dep:paste"]
deferred-division=["dep:ratio_ops"]
[dependencies]
ratio_ops = { path = "../ratio_ops", optional = true }
fixed_wide = { path = "../fixed_wide", optional = true }
paste = { version = "1.0.15", optional = true }
[dev-dependencies]
fixed_wide = { version = "0.1.0", path = "../fixed_wide", features = ["wide-mul"] }
fixed_wide = { path = "../fixed_wide", features = ["wide-mul"] }

@ -0,0 +1,79 @@
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_fixed_wide_vector_not_const_generic {
(
(),
$n:expr
) => {
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$n,{$n*32}>>{
#[inline]
pub fn length(self)-><fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output{
self.length_squared().sqrt_unchecked()
}
#[inline]
pub fn with_length<U,V>(self,length:U)-><Vector<N,V> as core::ops::Div<<fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output>>::Output
where
fixed_wide::fixed::Fixed<$n,{$n*32}>:core::ops::Mul<U,Output=V>,
U:Copy,
V:core::ops::Div<<fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output>,
{
self*length/self.length()
}
}
};
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! macro_4 {
( $macro: ident, $any:tt ) => {
$crate::macro_repeated!($macro,$any,1,2,3,4);
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_fixed_wide_vector {
() => {
$crate::macro_4!(impl_fixed_wide_vector_not_const_generic,());
// I LOVE NOT BEING ABLE TO USE CONST GENERICS
$crate::macro_repeated!(
impl_fix_not_const_generic,(),
(1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),
(1,2),(2,2),(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(1,3),(2,3),(3,3),(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(1,4),(2,4),(3,4),(4,4),(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(1,5),(2,5),(3,5),(4,5),(5,5),(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(1,6),(2,6),(3,6),(4,6),(5,6),(6,6),(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),(7,7),(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),(8,8),(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),(9,9),(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),(10,10),(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),(11,11),(12,11),(13,11),(14,11),(15,11),(16,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),(12,12),(13,12),(14,12),(15,12),(16,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),(13,13),(14,13),(15,13),(16,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),(14,14),(15,14),(16,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),(15,15),(16,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),(16,16)
);
};
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_fix_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
self.map(|t|t.[<fix_ $rhs>]())
}
}
}
}
}

@ -4,14 +4,21 @@ macro_rules! impl_matrix {
() => {
impl<const X:usize,const Y:usize,T> Matrix<X,Y,T>{
#[inline(always)]
pub const fn new(array:[[T;X];Y])->Self{
pub const fn new(array:[[T;Y];X])->Self{
Self{array}
}
#[inline(always)]
pub fn to_array(self)->[[T;X];Y]{
pub fn to_array(self)->[[T;Y];X]{
self.array
}
#[inline]
pub fn from_cols(cols:[Vector<Y,T>;X])->Self
{
Matrix::new(
cols.map(|col|col.array),
)
}
#[inline]
pub fn map<F,U>(self,f:F)->Matrix<X,Y,U>
where
F:Fn(T)->U
@ -33,38 +40,45 @@ macro_rules! impl_matrix {
)
}
#[inline]
// MatY<VecX>.MatX<VecZ> = MatY<VecZ>
// old (list of rows) MatY<VecX>.MatX<VecZ> = MatY<VecZ>
// new (list of columns) MatX<VecY>.MatZ<VecX> = MatZ<VecY>
pub fn dot<const Z:usize,U,V>(self,rhs:Matrix<Z,X,U>)->Matrix<Z,Y,V>
where
T:core::ops::Mul<U,Output=V>+Copy,
V:core::iter::Sum,
U:Copy,
{
let mut array_of_iterators=rhs.array.map(|axis|axis.into_iter().cycle());
Matrix::new(
self.array.map(|axis|
let mut array_of_iterators=self.array.map(|axis|axis.into_iter().cycle());
Matrix{
array:rhs.array.map(|rhs_axis|
core::array::from_fn(|_|
// axis dot product with transposed rhs array
axis.iter().zip(
array_of_iterators.iter_mut()
).map(|(&lhs_value,rhs_iter)|
lhs_value*rhs_iter.next().unwrap()
).sum()
array_of_iterators
.iter_mut()
.zip(rhs_axis.iter())
.map(|(lhs_iter,&rhs_value)|
lhs_iter.next().unwrap()*rhs_value
).sum()
)
)
)
}
}
#[inline]
// MatY<VecX>.VecX = VecY
// MatX<VecY>.VecY = VecX
pub fn transform_vector<U,V>(self,rhs:Vector<X,U>)->Vector<Y,V>
where
T:core::ops::Mul<U,Output=V>,
V:core::iter::Sum,
U:Copy,
{
let mut array_of_iterators=self.array.map(|axis|axis.into_iter());
Vector::new(
self.array.map(|axis|
Vector::new(axis).dot(rhs)
core::array::from_fn(|_|
array_of_iterators
.iter_mut()
.zip(rhs.array.iter())
.map(|(lhs_iter,&rhs_value)|
lhs_iter.next().unwrap()*rhs_value
).sum()
)
)
}
@ -75,7 +89,7 @@ macro_rules! impl_matrix {
{
#[inline(always)]
pub const fn from_value(value:T)->Self{
Self::new([[value;X];Y])
Self::new([[value;Y];X])
}
}
@ -91,13 +105,13 @@ macro_rules! impl_matrix {
impl<const X:usize,const Y:usize,T:core::fmt::Display> core::fmt::Display for Matrix<X,Y,T>{
#[inline]
fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
for row in &self.array[0..Y]{
for col in &self.array[0..X]{
core::write!(f,"\n")?;
for elem in &row[0..X-1]{
for elem in &col[0..Y-1]{
core::write!(f,"{}, ",elem)?;
}
// assume we will be using matrices of size 1x1 or greater
core::write!(f,"{}",row.last().unwrap())?;
core::write!(f,"{}",col.last().unwrap())?;
}
Ok(())
}
@ -136,6 +150,13 @@ macro_rules! impl_matrix {
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix_deferred_division {
() => {
impl<const X:usize,const Y:usize,T:ratio_ops::ratio::Divide<U,Output=V>,U:Copy,V> ratio_ops::ratio::Divide<U> for Matrix<X,Y,T>{
type Output=Matrix<X,Y,V>;
#[inline]
fn divide(self,rhs:U)->Self::Output{
self.map(|t|t.divide(rhs))
}
}
impl<const X:usize,const Y:usize,T,U> core::ops::Div<U> for Matrix<X,Y,T>{
type Output=ratio_ops::ratio::Ratio<Matrix<X,Y,T>,U>;
#[inline]
@ -152,14 +173,14 @@ macro_rules! impl_matrix_extend {
( $x: expr, $y: expr ) => {
impl<T> Matrix<$x,$y,T>{
#[inline]
pub fn extend_row(self,value:Vector<$x,T>)->Matrix<$x,{$y+1},T>{
pub fn extend_column(self,value:Vector<$y,T>)->Matrix<{$x+1},$y,T>{
let mut iter=self.array.into_iter().chain(core::iter::once(value.array));
Matrix::new(
core::array::from_fn(|_|iter.next().unwrap()),
)
}
#[inline]
pub fn extend_column(self,value:Vector<$y,T>)->Matrix<{$x+1},$y,T>{
pub fn extend_row(self,value:Vector<$x,T>)->Matrix<$x,{$y+1},T>{
let mut iter_rows=value.array.into_iter();
Matrix::new(
self.array.map(|axis|{

@ -2,6 +2,9 @@ pub mod common;
pub mod vector;
pub mod matrix;
#[cfg(feature="fixed-wide")]
pub mod fixed_wide;
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! macro_repeated{

@ -164,12 +164,23 @@ macro_rules! impl_vector {
$crate::impl_vector_shift_operator!(Shl, shl);
$crate::impl_vector_shift_assign_operator!(ShrAssign, shr_assign);
$crate::impl_vector_shift_operator!(Shr, shr);
// dedicated methods for this type
#[cfg(feature="fixed-wide")]
$crate::impl_fixed_wide_vector!();
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_deferred_division {
() => {
impl<const N:usize,T:ratio_ops::ratio::Divide<U,Output=V>,U:Copy,V> ratio_ops::ratio::Divide<U> for Vector<N,T>{
type Output=Vector<N,V>;
#[inline]
fn divide(self,rhs:U)->Self::Output{
self.map(|t|t.divide(rhs))
}
}
impl<const N:usize,T,U> core::ops::Div<U> for Vector<N,T>{
type Output=ratio_ops::ratio::Ratio<Vector<N,T>,U>;
#[inline]

@ -2,7 +2,7 @@ use crate::vector::Vector;
#[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)]
pub struct Matrix<const X:usize,const Y:usize,T>{
pub(crate) array:[[T;X];Y],
pub(crate) array:[[T;Y];X],
}
crate::impl_matrix!();

@ -1,4 +1,4 @@
use crate::types::{Matrix3,Matrix2x3,Matrix4x3,Matrix2x4,Vector3};
use crate::types::{Matrix3,Matrix3x2,Matrix3x4,Matrix4x2,Vector3};
type Planar64=fixed_wide::types::I32F32;
type Planar64Wide1=fixed_wide::types::I64F64;
@ -37,28 +37,28 @@ fn wide_vec3_length_squared(){
#[test]
fn wide_matrix_dot(){
let lhs=Matrix4x3::new([
let lhs=Matrix3x4::new([
[Planar64::from(1),Planar64::from(2),Planar64::from(3),Planar64::from(4)],
[Planar64::from(5),Planar64::from(6),Planar64::from(7),Planar64::from(8)],
[Planar64::from(9),Planar64::from(10),Planar64::from(11),Planar64::from(12)],
]);
let rhs=Matrix2x4::new([
]).transpose();
let rhs=Matrix4x2::new([
[Planar64::from(1),Planar64::from(2)],
[Planar64::from(3),Planar64::from(4)],
[Planar64::from(5),Planar64::from(6)],
[Planar64::from(7),Planar64::from(8)],
]);
]).transpose();
// Mat3<Vec4>.dot(Mat4<Vec2>) -> Mat3<Vec2>
let m_dot=lhs*rhs;
//In[1]:= {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}} . {{1, 2}, {3, 4}, {5, 6}, {7, 8}}
//Out[1]= {{50, 60}, {114, 140}, {178, 220}}
assert_eq!(
m_dot.array,
Matrix2x3::new([
Matrix3x2::new([
[Planar64Wide1::from(50),Planar64Wide1::from(60)],
[Planar64Wide1::from(114),Planar64Wide1::from(140)],
[Planar64Wide1::from(178),Planar64Wide1::from(220)],
]).array
]).transpose().array
);
}

@ -1,4 +1,4 @@
use crate::types::{Vector2,Vector3,Matrix4x3,Matrix2x4,Matrix2x3,Matrix3x2};
use crate::types::{Vector2,Vector3,Matrix3x4,Matrix4x2,Matrix3x2,Matrix2x3};
#[test]
fn test_bool(){
@ -21,10 +21,10 @@ fn test_arithmetic(){
#[test]
fn matrix_transform_vector(){
let m=Matrix3x2::new([
let m=Matrix2x3::new([
[1,2,3],
[4,5,6],
]);
]).transpose();
let v=Vector3::new([1,2,3]);
let transformed=m*v;
assert_eq!(transformed.array,Vector2::new([14,32]).array);
@ -32,28 +32,28 @@ fn matrix_transform_vector(){
#[test]
fn matrix_dot(){
let rhs=Matrix2x4::new([
// All this code was written row major and I converted the lib to colum major
let rhs=Matrix4x2::new([
[ 1.0, 2.0],
[ 3.0, 4.0],
[ 5.0, 6.0],
[ 7.0, 8.0],
]); // | | |
let lhs=Matrix4x3::new([ // | | |
]).transpose(); // | | |
let lhs=Matrix3x4::new([ // | | |
[1.0, 2.0, 3.0, 4.0],// [ 50.0, 60.0],
[5.0, 6.0, 7.0, 8.0],// [114.0,140.0],
[9.0,10.0,11.0,12.0],// [178.0,220.0],
]);
]).transpose();
// Mat3<Vec4>.dot(Mat4<Vec2>) -> Mat3<Vec2>
let m_dot=lhs*rhs;
//In[1]:= {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}} . {{1, 2}, {3, 4}, {5, 6}, {7, 8}}
//Out[1]= {{50, 60}, {114, 140}, {178, 220}}
assert_eq!(
m_dot.array,
Matrix2x3::new([
Matrix3x2::new([
[50.0,60.0],
[114.0,140.0],
[178.0,220.0],
]).array
]).transpose().array
);
}

@ -1 +1,4 @@
pub mod ratio;
#[cfg(test)]
mod tests;

@ -10,13 +10,19 @@ impl<Num,Den> Ratio<Num,Den>{
}
}
/// The actual divide implementation, Div is replaced with a Ratio constructor
pub trait Divide<Rhs=Self>{
type Output;
fn divide(self,rhs:Rhs)->Self::Output;
}
impl<Num,Den> Ratio<Num,Den>
where
Num:core::ops::Div<Den>,
Num:Divide<Den>,
{
#[inline]
pub fn divide(num:Num,den:Den)-><Num as core::ops::Div<Den>>::Output{
num/den
pub fn divide(self)-><Num as Divide<Den>>::Output{
self.num.divide(self.den)
}
}
@ -62,6 +68,78 @@ impl_ratio_method!(Add,add,add_ratio);
impl_ratio_method!(Sub,sub,sub_ratio);
impl_ratio_method!(Rem,rem,rem_ratio);
/// Comparing two ratios needs to know the parity of the denominators
/// For signed integers this can be implemented with is_negative()
pub trait Parity{
fn parity(&self)->bool;
}
macro_rules! impl_parity_unsigned{
($($type:ty),*)=>{
$(
impl Parity for $type{
fn parity(&self)->bool{
false
}
}
)*
};
}
macro_rules! impl_parity_signed{
($($type:ty),*)=>{
$(
impl Parity for $type{
fn parity(&self)->bool{
self.is_negative()
}
}
)*
};
}
macro_rules! impl_parity_float{
($($type:ty),*)=>{
$(
impl Parity for $type{
fn parity(&self)->bool{
self.is_sign_negative()
}
}
)*
};
}
impl_parity_unsigned!(u8,u16,u32,u64,u128,usize);
impl_parity_signed!(i8,i16,i32,i64,i128,isize);
impl_parity_float!(f32,f64);
macro_rules! impl_ratio_ord_method{
($method:ident, $ratio_method:ident, $output:ty)=>{
impl<LhsNum,LhsDen:Parity> Ratio<LhsNum,LhsDen>{
#[inline]
pub fn $ratio_method<RhsNum,RhsDen:Parity,T>(self,rhs:Ratio<RhsNum,RhsDen>)->$output
where
LhsNum:core::ops::Mul<RhsDen,Output=T>,
LhsDen:core::ops::Mul<RhsNum,Output=T>,
T:Ord,
{
match self.den.parity()^rhs.den.parity(){
true=>(self.den*rhs.num).$method(&(self.num*rhs.den)),
false=>(self.num*rhs.den).$method(&(self.den*rhs.num)),
}
}
}
}
}
//PartialEq
impl_ratio_ord_method!(eq,eq_ratio,bool);
//PartialOrd
impl_ratio_ord_method!(lt,lt_ratio,bool);
impl_ratio_ord_method!(gt,gt_ratio,bool);
impl_ratio_ord_method!(le,le_ratio,bool);
impl_ratio_ord_method!(ge,ge_ratio,bool);
impl_ratio_ord_method!(partial_cmp,partial_cmp_ratio,Option<core::cmp::Ordering>);
//Ord
impl_ratio_ord_method!(cmp,cmp_ratio,core::cmp::Ordering);
/* generic rhs mul is not possible!
impl<Lhs,RhsNum,RhsDen> core::ops::Mul<Ratio<RhsNum,RhsDen>> for Lhs
where
@ -169,3 +247,48 @@ macro_rules! impl_ratio_assign_operator {
impl_ratio_assign_operator!(AddAssign,add_assign);
impl_ratio_assign_operator!(SubAssign,sub_assign);
impl_ratio_assign_operator!(RemAssign,rem_assign);
// Only implement PartialEq<Self>
// Rust's operators aren't actually that good
impl<Num,Den,T> PartialEq for Ratio<Num,Den>
where
Num:Copy,
Den:Copy,
Num:core::ops::Mul<Den,Output=T>,
T:PartialEq,
{
#[inline]
fn eq(&self,&other:&Self)->bool{
(self.num*other.den).eq(&(other.num*self.den))
}
}
impl<Num,Den> Eq for Ratio<Num,Den>
where
Ratio<Num,Den>:PartialEq,
{}
impl<Num,Den,T> PartialOrd for Ratio<Num,Den>
where
Num:Copy,
Den:Copy,
Num:core::ops::Mul<Den,Output=T>,
T:PartialOrd,
{
#[inline]
fn partial_cmp(&self,&other:&Self)->Option<core::cmp::Ordering>{
(self.num*other.den).partial_cmp(&(other.num*self.den))
}
}
impl<Num,Den,T> Ord for Ratio<Num,Den>
where
Num:Copy,
Den:Copy,
Num:core::ops::Mul<Den,Output=T>,
T:Ord,
{
#[inline]
fn cmp(&self,other:&Self)->std::cmp::Ordering{
(self.num*other.den).cmp(&(other.num*self.den))
}
}

58
ratio_ops/src/tests.rs Normal file

@ -0,0 +1,58 @@
use crate::ratio::Ratio;
macro_rules! test_op{
($ratio_op:ident,$op:ident,$a:expr,$b:expr,$c:expr,$d:expr)=>{
assert_eq!(
Ratio::new($a,$b).$ratio_op(Ratio::new($c,$d)),
(($a as f32)/($b as f32)).$op(&(($c as f32)/($d as f32)))
);
};
}
macro_rules! test_many_ops{
($ratio_op:ident,$op:ident)=>{
test_op!($ratio_op,$op,1,2,3,4);
test_op!($ratio_op,$op,1,2,-3,4);
test_op!($ratio_op,$op,-1,2,-3,4);
test_op!($ratio_op,$op,-1,-2,-3,4);
test_op!($ratio_op,$op,2,1,6,3);
test_op!($ratio_op,$op,-2,1,6,3);
test_op!($ratio_op,$op,2,-1,-6,3);
test_op!($ratio_op,$op,2,1,6,-3);
};
}
#[test]
fn test_lt(){
test_many_ops!(lt_ratio,lt);
}
#[test]
fn test_gt(){
test_many_ops!(gt_ratio,gt);
}
#[test]
fn test_le(){
test_many_ops!(le_ratio,le);
}
#[test]
fn test_ge(){
test_many_ops!(ge_ratio,ge);
}
#[test]
fn test_eq(){
test_many_ops!(eq_ratio,eq);
}
#[test]
fn test_partial_cmp(){
test_many_ops!(partial_cmp_ratio,partial_cmp);
}
// #[test]
// fn test_cmp(){
// test_many_ops!(cmp_ratio,cmp);
// }