diff --git a/lib/README.md b/lib/README.md
new file mode 100644
index 0000000..51cbf8c
--- /dev/null
+++ b/lib/README.md
@@ -0,0 +1,19 @@
+Vectors: Fixed Size, Fixed Point, Wide
+======================================
+
+## These exist separately in the Rust ecosystem, but not together.
+
+#### License
+
+
+Licensed under either of Apache License, Version
+2.0 or MIT license at your option.
+
+
+
+
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
+
diff --git a/lib/fixed_wide/.gitignore b/lib/fixed_wide/.gitignore
new file mode 100644
index 0000000..ea8c4bf
--- /dev/null
+++ b/lib/fixed_wide/.gitignore
@@ -0,0 +1 @@
+/target
diff --git a/lib/fixed_wide/Cargo.lock b/lib/fixed_wide/Cargo.lock
new file mode 100644
index 0000000..695e4f8
--- /dev/null
+++ b/lib/fixed_wide/Cargo.lock
@@ -0,0 +1,35 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "arrayvec"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
+
+[[package]]
+name = "bnum"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50202def95bf36cb7d1d7a7962cea1c36a3f8ad42425e5d2b71d7acb8041b5b8"
+
+[[package]]
+name = "fixed_wide"
+version = "0.1.1"
+dependencies = [
+ "arrayvec",
+ "bnum",
+ "paste",
+ "ratio_ops",
+]
+
+[[package]]
+name = "paste"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
+
+[[package]]
+name = "ratio_ops"
+version = "0.1.0"
diff --git a/lib/fixed_wide/Cargo.toml b/lib/fixed_wide/Cargo.toml
new file mode 100644
index 0000000..619d592
--- /dev/null
+++ b/lib/fixed_wide/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "fixed_wide"
+version = "0.1.1"
+edition = "2021"
+repository = "https://git.itzana.me/StrafesNET/fixed_wide_vectors"
+license = "MIT OR Apache-2.0"
+description = "Fixed point numbers with optional widening Mul operator."
+authors = ["Rhys Lloyd "]
+
+[features]
+default=[]
+deferred-division=["dep:ratio_ops"]
+wide-mul=[]
+zeroes=["dep:arrayvec"]
+
+[dependencies]
+bnum = "0.12.0"
+arrayvec = { version = "0.7.6", optional = true }
+paste = "1.0.15"
+ratio_ops = { version = "0.1.0", path = "../ratio_ops", registry = "strafesnet", optional = true }
diff --git a/lib/fixed_wide/LICENSE-APACHE b/lib/fixed_wide/LICENSE-APACHE
new file mode 100644
index 0000000..a7e77cb
--- /dev/null
+++ b/lib/fixed_wide/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
\ No newline at end of file
diff --git a/lib/fixed_wide/LICENSE-MIT b/lib/fixed_wide/LICENSE-MIT
new file mode 100644
index 0000000..468cd79
--- /dev/null
+++ b/lib/fixed_wide/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/lib/fixed_wide/src/fixed.rs b/lib/fixed_wide/src/fixed.rs
new file mode 100644
index 0000000..58aae16
--- /dev/null
+++ b/lib/fixed_wide/src/fixed.rs
@@ -0,0 +1,848 @@
+use bnum::{BInt,cast::As};
+
+#[derive(Clone,Copy,Debug,Default,Hash)]
+/// A Fixed point number for which multiply operations widen the bits in the output. (when the wide-mul feature is enabled)
+/// N is the number of u64s to use
+/// F is the number of fractional bits (always N*32 lol)
+pub struct Fixed{
+ pub(crate)bits:BInt<{N}>,
+}
+
+impl Fixed{
+ pub const MAX:Self=Self::from_bits(BInt::::MAX);
+ pub const MIN:Self=Self::from_bits(BInt::::MIN);
+ pub const ZERO:Self=Self::from_bits(BInt::::ZERO);
+ pub const EPSILON:Self=Self::from_bits(BInt::::ONE);
+ pub const NEG_EPSILON:Self=Self::from_bits(BInt::::NEG_ONE);
+ pub const ONE:Self=Self::from_bits(BInt::::ONE.shl(F as u32));
+ pub const TWO:Self=Self::from_bits(BInt::::TWO.shl(F as u32));
+ pub const HALF:Self=Self::from_bits(BInt::::ONE.shl(F as u32-1));
+ pub const NEG_ONE:Self=Self::from_bits(BInt::::NEG_ONE.shl(F as u32));
+ pub const NEG_TWO:Self=Self::from_bits(BInt::::NEG_TWO.shl(F as u32));
+ pub const NEG_HALF:Self=Self::from_bits(BInt::::NEG_ONE.shl(F as u32-1));
+}
+impl Fixed{
+ #[inline]
+ pub const fn from_bits(bits:BInt::)->Self{
+ Self{
+ bits,
+ }
+ }
+ #[inline]
+ pub const fn to_bits(self)->BInt{
+ self.bits
+ }
+ #[inline]
+ pub const fn raw_digit(value:i64)->Self{
+ let mut digits=[0u64;N];
+ digits[0]=value.abs() as u64;
+ //sign bit
+ digits[N-1]|=(value&i64::MIN) as u64;
+ Self::from_bits(BInt::from_bits(bnum::BUint::from_digits(digits)))
+ }
+ #[inline]
+ pub const fn is_zero(self)->bool{
+ self.bits.is_zero()
+ }
+ #[inline]
+ pub const fn is_negative(self)->bool{
+ self.bits.is_negative()
+ }
+ #[inline]
+ pub const fn is_positive(self)->bool{
+ self.bits.is_positive()
+ }
+ #[inline]
+ pub const fn abs(self)->Self{
+ Self::from_bits(self.bits.abs())
+ }
+}
+impl Fixed<1,F>{
+ /// My old code called this function everywhere so let's provide it
+ #[inline]
+ pub const fn raw(value:i64)->Self{
+ Self::from_bits(BInt::from_bits(bnum::BUint::from_digit(value as u64)))
+ }
+ #[inline]
+ pub const fn to_raw(self)->i64{
+ let &[digit]=self.to_bits().to_bits().digits();
+ digit as i64
+ }
+}
+
+macro_rules! impl_from {
+ ($($from:ty),*)=>{
+ $(
+ impl From<$from> for Fixed{
+ #[inline]
+ fn from(value:$from)->Self{
+ Self::from_bits(BInt::<{N}>::from(value)< PartialEq for Fixed{
+ #[inline]
+ fn eq(&self,other:&Self)->bool{
+ self.bits.eq(&other.bits)
+ }
+}
+impl PartialEq for Fixed
+where
+ T:Copy,
+ BInt:::From,
+{
+ #[inline]
+ fn eq(&self,&other:&T)->bool{
+ self.bits.eq(&other.into())
+ }
+}
+impl Eq for Fixed{}
+
+impl PartialOrd for Fixed{
+ #[inline]
+ fn partial_cmp(&self,other:&Self)->Option{
+ self.bits.partial_cmp(&other.bits)
+ }
+}
+impl PartialOrd for Fixed
+ where
+ T:Copy,
+ BInt:::From,
+{
+ #[inline]
+ fn partial_cmp(&self,&other:&T)->Option{
+ self.bits.partial_cmp(&other.into())
+ }
+}
+impl Ord for Fixed{
+ #[inline]
+ fn cmp(&self,other:&Self)->std::cmp::Ordering{
+ self.bits.cmp(&other.bits)
+ }
+}
+
+impl std::ops::Neg for Fixed{
+ type Output=Self;
+ #[inline]
+ fn neg(self)->Self{
+ Self::from_bits(self.bits.neg())
+ }
+}
+impl std::iter::Sum for Fixed{
+ #[inline]
+ fn sum>(iter:I)->Self{
+ let mut sum=Self::ZERO;
+ for elem in iter{
+ sum+=elem;
+ }
+ sum
+ }
+}
+
+const fn signed_shift(lhs:u64,rhs:i32)->u64{
+ if rhs.is_negative(){
+ lhs>>-rhs
+ }else{
+ lhs< {
+ impl Into<$output> for Fixed{
+ #[inline]
+ fn into(self)->$output{
+ const DIGIT_SHIFT:u32=6;//Log2[64]
+ // SBBB BBBB
+ // 1001 1110 0000 0000
+ let sign=if self.bits.is_negative(){(1 as $unsigned)<<(<$unsigned>::BITS-1)}else{0};
+ let unsigned=self.bits.unsigned_abs();
+ let most_significant_bit=unsigned.bits();
+ let exp=if unsigned.is_zero(){
+ 0
+ }else{
+ let msb=most_significant_bit as $unsigned;
+ let _127=((1 as $unsigned)<<($exponent_bits-1))-1;
+ let msb_offset=msb+_127-1-F as $unsigned;
+ msb_offset<<($mantissa_bits-1)
+ };
+ let digits=unsigned.digits();
+ let digit_index=most_significant_bit.saturating_sub(1)>>DIGIT_SHIFT;
+ let digit=digits[digit_index as usize];
+ //How many bits does the mantissa take from this digit
+ let take_bits=most_significant_bit-(digit_index<::from_bits(bits)
+ }
+ }
+ }
+}
+impl_into_float!(f32,u32,8,24);
+impl_into_float!(f64,u64,11,53);
+
+#[inline]
+fn integer_decode_f32(f: f32) -> (u64, i16, bool) {
+ let bits: u32 = f.to_bits();
+ let sign: bool = bits & (1<<31) != 0;
+ let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0x7fffff) << 1
+ } else {
+ (bits & 0x7fffff) | 0x800000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 127 + 23;
+ (mantissa as u64, exponent, sign)
+}
+#[inline]
+fn integer_decode_f64(f: f64) -> (u64, i16, bool) {
+ let bits: u64 = f.to_bits();
+ let sign: bool = bits & (1u64<<63) != 0;
+ let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0xfffffffffffff) << 1
+ } else {
+ (bits & 0xfffffffffffff) | 0x10000000000000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 1023 + 52;
+ (mantissa, exponent, sign)
+}
+#[derive(Debug,Eq,PartialEq)]
+pub enum FixedFromFloatError{
+ Nan,
+ Infinite,
+ Overflow,
+ Underflow,
+}
+impl FixedFromFloatError{
+ pub fn underflow_to_zero(self)->Result,Self>{
+ match self{
+ FixedFromFloatError::Underflow=>Ok(Fixed::ZERO),
+ _=>Err(self),
+ }
+ }
+}
+macro_rules! impl_from_float {
+ ( $decode:ident, $input: ty, $mantissa_bits:expr ) => {
+ impl TryFrom<$input> for Fixed{
+ type Error=FixedFromFloatError;
+ #[inline]
+ fn try_from(value:$input)->Result{
+ const DIGIT_SHIFT:u32=6;
+ match value.classify(){
+ std::num::FpCategory::Nan=>Err(FixedFromFloatError::Nan),
+ std::num::FpCategory::Infinite=>Err(FixedFromFloatError::Infinite),
+ std::num::FpCategory::Zero=>Ok(Self::ZERO),
+ std::num::FpCategory::Subnormal
+ |std::num::FpCategory::Normal
+ =>{
+ let (m,e,s)=$decode(value);
+ let mut digits=[0u64;N];
+ let most_significant_bit=e as i32+$mantissa_bits as i32+F as i32;
+ if most_significant_bit<0{
+ return Err(FixedFromFloatError::Underflow);
+ }
+ let digit_index=most_significant_bit>>DIGIT_SHIFT;
+ let digit=digits.get_mut(digit_index as usize).ok_or(FixedFromFloatError::Overflow)?;
+ let take_bits=most_significant_bit-(digit_index< core::fmt::Display for Fixed{
+ #[inline]
+ fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
+ let float:f32=(*self).into();
+ core::write!(f,"{:.3}",float)
+ }
+}
+
+macro_rules! impl_additive_operator {
+ ( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
+ impl $struct{
+ #[inline]
+ pub const fn $method(self, other: Self) -> Self {
+ Self::from_bits(self.bits.$method(other.bits))
+ }
+ }
+ impl core::ops::$trait for $struct{
+ type Output = $output;
+ #[inline]
+ fn $method(self, other: Self) -> Self::Output {
+ self.$method(other)
+ }
+ }
+ impl core::ops::$trait for $struct
+ where
+ BInt:::From,
+ {
+ type Output = $output;
+ #[inline]
+ fn $method(self, other: U) -> Self::Output {
+ Self::from_bits(self.bits.$method(BInt::::from(other).shl(F as u32)))
+ }
+ }
+ };
+}
+macro_rules! impl_additive_assign_operator {
+ ( $struct: ident, $trait: ident, $method: ident ) => {
+ impl core::ops::$trait for $struct{
+ #[inline]
+ fn $method(&mut self, other: Self) {
+ self.bits.$method(other.bits);
+ }
+ }
+ impl core::ops::$trait for $struct
+ where
+ BInt:::From,
+ {
+ #[inline]
+ fn $method(&mut self, other: U) {
+ self.bits.$method(BInt::::from(other).shl(F as u32));
+ }
+ }
+ };
+}
+
+// Impl arithmetic pperators
+impl_additive_assign_operator!( Fixed, AddAssign, add_assign );
+impl_additive_operator!( Fixed, Add, add, Self );
+impl_additive_assign_operator!( Fixed, SubAssign, sub_assign );
+impl_additive_operator!( Fixed, Sub, sub, Self );
+impl_additive_assign_operator!( Fixed, RemAssign, rem_assign );
+impl_additive_operator!( Fixed, Rem, rem, Self );
+
+// Impl bitwise operators
+impl_additive_assign_operator!( Fixed, BitAndAssign, bitand_assign );
+impl_additive_operator!( Fixed, BitAnd, bitand, Self );
+impl_additive_assign_operator!( Fixed, BitOrAssign, bitor_assign );
+impl_additive_operator!( Fixed, BitOr, bitor, Self );
+impl_additive_assign_operator!( Fixed, BitXorAssign, bitxor_assign );
+impl_additive_operator!( Fixed, BitXor, bitxor, Self );
+
+// non-wide operators. The result is the same width as the inputs.
+
+// This macro is not used in the default configuration.
+#[allow(unused_macros)]
+macro_rules! impl_multiplicative_operator_not_const_generic {
+ ( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
+ impl core::ops::$trait for $struct<$width,F>{
+ type Output = $output;
+ #[inline]
+ fn $method(self, other: Self) -> Self::Output {
+ paste::item!{
+ self.[](other)
+ }
+ }
+ }
+ };
+}
+macro_rules! impl_multiplicative_assign_operator_not_const_generic {
+ ( ($struct: ident, $trait: ident, $method: ident, $non_assign_method: ident ), $width:expr ) => {
+ impl core::ops::$trait for $struct<$width,F>{
+ #[inline]
+ fn $method(&mut self, other: Self) {
+ paste::item!{
+ *self=self.[](other);
+ }
+ }
+ }
+ };
+}
+
+macro_rules! impl_multiply_operator_not_const_generic {
+ ( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
+ impl $struct<$width,F>{
+ paste::item!{
+ #[inline]
+ pub fn [](self, rhs: Self) -> Self {
+ let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
+ let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
+ if self.is_negative()==rhs.is_negative(){
+ Self::from_bits(out.shr(F as u32).as_())
+ }else{
+ -Self::from_bits(out.shr(F as u32).as_())
+ }
+ }
+ }
+ }
+ #[cfg(not(feature="wide-mul"))]
+ impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
+ #[cfg(feature="deferred-division")]
+ impl ratio_ops::ratio::Divide for Fixed<$width,{$width*32}>{
+ type Output=Self;
+ #[inline]
+ fn divide(self, other: i64)->Self::Output{
+ Self::from_bits(self.bits.div_euclid(BInt::from(other)))
+ }
+ }
+ }
+}
+macro_rules! impl_divide_operator_not_const_generic {
+ ( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
+ impl $struct<$width,F>{
+ paste::item!{
+ #[inline]
+ pub fn [](self,other:Self)->Self{
+ //this only needs to be $width+F as u32/64+1 but MUH CONST GENERICS!!!!!
+ let lhs=self.bits.as_::>().shl(F as u32);
+ let rhs=other.bits.as_::>();
+ Self::from_bits(lhs.div_euclid(rhs).as_())
+ }
+ }
+ }
+ #[cfg(all(not(feature="wide-mul"),not(feature="deferred-division")))]
+ impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
+ #[cfg(all(not(feature="wide-mul"),feature="deferred-division"))]
+ impl ratio_ops::ratio::Divide for $struct<$width,F>{
+ type Output = $output;
+ #[inline]
+ fn divide(self, other: Self) -> Self::Output {
+ paste::item!{
+ self.[](other)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! impl_multiplicative_operator {
+ ( $struct: ident, $trait: ident, $method: ident, $inner_method: ident, $output: ty ) => {
+ impl core::ops::$trait for $struct
+ where
+ BInt:::From+core::ops::$trait,
+ {
+ type Output = $output;
+ #[inline]
+ fn $method(self,other:U)->Self::Output{
+ Self::from_bits(self.bits.$inner_method(BInt::::from(other)))
+ }
+ }
+ };
+}
+macro_rules! impl_multiplicative_assign_operator {
+ ( $struct: ident, $trait: ident, $method: ident, $not_assign_method: ident ) => {
+ impl core::ops::$trait for $struct
+ where
+ BInt:::From+core::ops::$trait,
+ {
+ #[inline]
+ fn $method(&mut self,other:U){
+ self.bits=self.bits.$not_assign_method(BInt::::from(other));
+ }
+ }
+ };
+}
+
+macro_rules! macro_repeated{
+ (
+ $macro:ident,
+ $any:tt,
+ $($repeated:tt),*
+ )=>{
+ $(
+ $macro!($any, $repeated);
+ )*
+ };
+}
+
+macro_rules! macro_16 {
+ ( $macro: ident, $any:tt ) => {
+ macro_repeated!($macro,$any,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
+ }
+}
+
+macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, MulAssign, mul_assign, mul) );
+macro_16!( impl_multiply_operator_not_const_generic, (Fixed, Mul, mul, Self) );
+macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, DivAssign, div_assign, div) );
+macro_16!( impl_divide_operator_not_const_generic, (Fixed, Div, div, Self) );
+impl_multiplicative_assign_operator!( Fixed, MulAssign, mul_assign, mul );
+impl_multiplicative_operator!( Fixed, Mul, mul, mul, Self );
+impl_multiplicative_assign_operator!( Fixed, DivAssign, div_assign, div_euclid );
+impl_multiplicative_operator!( Fixed, Div, div, div_euclid, Self );
+#[cfg(feature="deferred-division")]
+impl core::ops::Div> for Fixed{
+ type Output=ratio_ops::ratio::Ratio,Fixed>;
+ #[inline]
+ fn div(self, other: Fixed)->Self::Output{
+ ratio_ops::ratio::Ratio::new(self,other)
+ }
+}
+#[cfg(feature="deferred-division")]
+impl ratio_ops::ratio::Parity for Fixed{
+ fn parity(&self)->bool{
+ self.is_negative()
+ }
+}
+macro_rules! impl_shift_operator {
+ ( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
+ impl core::ops::$trait for $struct{
+ type Output = $output;
+ #[inline]
+ fn $method(self, other: u32) -> Self::Output {
+ Self::from_bits(self.bits.$method(other))
+ }
+ }
+ };
+}
+macro_rules! impl_shift_assign_operator {
+ ( $struct: ident, $trait: ident, $method: ident ) => {
+ impl core::ops::$trait for $struct{
+ #[inline]
+ fn $method(&mut self, other: u32) {
+ self.bits.$method(other);
+ }
+ }
+ };
+}
+impl_shift_assign_operator!( Fixed, ShlAssign, shl_assign );
+impl_shift_operator!( Fixed, Shl, shl, Self );
+impl_shift_assign_operator!( Fixed, ShrAssign, shr_assign );
+impl_shift_operator!( Fixed, Shr, shr, Self );
+
+// wide operators. The result width is the sum of the input widths, i.e. none of the multiplication
+
+#[allow(unused_macros)]
+macro_rules! impl_wide_operators{
+ ($lhs:expr,$rhs:expr)=>{
+ impl core::ops::Mul> for Fixed<$lhs,{$lhs*32}>{
+ type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
+ #[inline]
+ fn mul(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
+ paste::item!{
+ self.[](other)
+ }
+ }
+ }
+ #[cfg(not(feature="deferred-division"))]
+ impl core::ops::Div> for Fixed<$lhs,{$lhs*32}>{
+ type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
+ #[inline]
+ fn div(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
+ paste::item!{
+ self.[](other)
+ }
+ }
+ }
+ #[cfg(feature="deferred-division")]
+ impl ratio_ops::ratio::Divide> for Fixed<$lhs,{$lhs*32}>{
+ type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
+ #[inline]
+ fn divide(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
+ paste::item!{
+ self.[](other)
+ }
+ }
+ }
+ }
+}
+
+// WIDE MUL: multiply into a wider type
+// let a = I32F32::ONE;
+// let b:I64F64 = a.wide_mul(a);
+macro_rules! impl_wide_not_const_generic{
+ (
+ (),
+ ($lhs:expr,$rhs:expr)
+ )=>{
+ impl Fixed<$lhs,{$lhs*32}>
+ {
+ paste::item!{
+ #[inline]
+ pub fn [](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
+ let lhs=self.bits.as_::>();
+ let rhs=rhs.bits.as_::>();
+ Fixed::from_bits(lhs*rhs)
+ }
+ /// This operation cannot represent the fraction exactly,
+ /// but it shapes the output to have precision for the
+ /// largest and smallest possible fractions.
+ #[inline]
+ pub fn [](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
+ // (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
+ let lhs=self.bits.as_::>().shl($rhs*64);
+ let rhs=rhs.bits.as_::>();
+ Fixed::from_bits(lhs/rhs)
+ }
+ }
+ }
+ #[cfg(feature="wide-mul")]
+ impl_wide_operators!($lhs,$rhs);
+ };
+}
+macro_rules! impl_wide_same_size_not_const_generic{
+ (
+ (),
+ $width:expr
+ )=>{
+ impl Fixed<$width,{$width*32}>
+ {
+ paste::item!{
+ #[inline]
+ pub fn [](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
+ let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
+ let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
+ if self.is_negative()==rhs.is_negative(){
+ Fixed::from_bits(out)
+ }else{
+ // Normal neg is the cheapest negation operation
+ // And the inputs cannot reach the point where it matters
+ Fixed::from_bits(out.neg())
+ }
+ }
+ /// This operation cannot represent the fraction exactly,
+ /// but it shapes the output to have precision for the
+ /// largest and smallest possible fractions.
+ #[inline]
+ pub fn [](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
+ // (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
+ let lhs=self.bits.as_::>().shl($width*64);
+ let rhs=rhs.bits.as_::>();
+ Fixed::from_bits(lhs/rhs)
+ }
+ }
+ }
+ #[cfg(feature="wide-mul")]
+ impl_wide_operators!($width,$width);
+ };
+}
+
+//const generics sidestepped wahoo
+macro_repeated!(
+ impl_wide_not_const_generic,(),
+ (2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),
+ (1,2), (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),
+ (1,3),(2,3), (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),
+ (1,4),(2,4),(3,4), (5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),
+ (1,5),(2,5),(3,5),(4,5), (6,5),(7,5),(8,5),(9,5),(10,5),(11,5),
+ (1,6),(2,6),(3,6),(4,6),(5,6), (7,6),(8,6),(9,6),(10,6),
+ (1,7),(2,7),(3,7),(4,7),(5,7),(6,7), (8,7),(9,7),
+ (1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8), (9,8),
+ (1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),
+ (1,10),(2,10),(3,10),(4,10),(5,10),(6,10),
+ (1,11),(2,11),(3,11),(4,11),(5,11),
+ (1,12),(2,12),(3,12),(4,12),
+ (1,13),(2,13),(3,13),
+ (1,14),(2,14),
+ (1,15)
+);
+macro_repeated!(
+ impl_wide_same_size_not_const_generic,(),
+ 1,2,3,4,5,6,7,8
+);
+
+pub trait Fix{
+ fn fix(self)->Out;
+}
+
+macro_rules! impl_fix_rhs_lt_lhs_not_const_generic{
+ (
+ (),
+ ($lhs:expr,$rhs:expr)
+ )=>{
+ impl Fixed<$lhs,{$lhs*32}>
+ {
+ paste::item!{
+ #[inline]
+ pub fn [](self)->Fixed<$rhs,{$rhs*32}>{
+ Fixed::from_bits(bnum::cast::As::as_::>(self.bits.shr(($lhs-$rhs)*32)))
+ }
+ }
+ }
+ impl Fix> for Fixed<$lhs,{$lhs*32}>{
+ fn fix(self)->Fixed<$rhs,{$rhs*32}>{
+ paste::item!{
+ self.[]()
+ }
+ }
+ }
+ }
+}
+macro_rules! impl_fix_lhs_lt_rhs_not_const_generic{
+ (
+ (),
+ ($lhs:expr,$rhs:expr)
+ )=>{
+ impl Fixed<$lhs,{$lhs*32}>
+ {
+ paste::item!{
+ #[inline]
+ pub fn [](self)->Fixed<$rhs,{$rhs*32}>{
+ Fixed::from_bits(bnum::cast::As::as_::>(self.bits).shl(($rhs-$lhs)*32))
+ }
+ }
+ }
+ impl Fix> for Fixed<$lhs,{$lhs*32}>{
+ fn fix(self)->Fixed<$rhs,{$rhs*32}>{
+ paste::item!{
+ self.[]()
+ }
+ }
+ }
+ }
+}
+macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{
+ (
+ (),
+ ($lhs:expr,$rhs:expr)
+ )=>{
+ impl Fixed<$lhs,{$lhs*32}>
+ {
+ paste::item!{
+ #[inline]
+ pub fn [](self)->Fixed<$rhs,{$rhs*32}>{
+ self
+ }
+ }
+ }
+ impl Fix> for Fixed<$lhs,{$lhs*32}>{
+ fn fix(self)->Fixed<$rhs,{$rhs*32}>{
+ paste::item!{
+ self.[]()
+ }
+ }
+ }
+ }
+}
+
+// I LOVE NOT BEING ABLE TO USE CONST GENERICS
+
+macro_repeated!(
+ impl_fix_rhs_lt_lhs_not_const_generic,(),
+ (2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
+ (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
+ (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
+ (5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
+ (6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
+ (7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
+ (8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
+ (9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
+ (10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
+ (11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
+ (12,11),(13,11),(14,11),(15,11),(16,11),
+ (13,12),(14,12),(15,12),(16,12),
+ (14,13),(15,13),(16,13),
+ (15,14),(16,14),
+ (16,15)
+);
+macro_repeated!(
+ impl_fix_lhs_lt_rhs_not_const_generic,(),
+ (1,2),
+ (1,3),(2,3),
+ (1,4),(2,4),(3,4),
+ (1,5),(2,5),(3,5),(4,5),
+ (1,6),(2,6),(3,6),(4,6),(5,6),
+ (1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
+ (1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
+ (1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
+ (1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
+ (1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
+ (1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
+ (1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
+ (1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
+ (1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
+ (1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16)
+);
+macro_repeated!(
+ impl_fix_lhs_eq_rhs_not_const_generic,(),
+ (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16)
+);
+
+macro_rules! impl_not_const_generic{
+ ($n:expr,$_2n:expr)=>{
+ impl Fixed<$n,{$n*32}>{
+ paste::item!{
+ #[inline]
+ pub fn sqrt_unchecked(self)->Self{
+ //1<>1 (sqrt-ish)
+ //3. add on fractional offset
+ //Voila
+ let used_bits=self.bits.bits() as i32-1-($n*32) as i32;
+ let max_shift=((used_bits>>1)+($n*32) as i32) as u32;
+ let mut result=Self::ZERO;
+
+ //resize self to match the wide mul output
+ let wide_self=self.[]();
+ //descend down the bits and check if flipping each bit would push the square over the input value
+ for shift in (0..=max_shift).rev(){
+ let new_result={
+ let mut bits=result.to_bits().to_bits();
+ bits.set_bit(shift,true);
+ Self::from_bits(BInt::from_bits(bits))
+ };
+ if new_result.[](new_result)<=wide_self{
+ result=new_result;
+ }
+ }
+ result
+ }
+ }
+ #[inline]
+ pub fn sqrt(self)->Self{
+ if selfOption{
+ if self>2;
+ let f:f32=a.into();
+ assert_eq!(f,0.25f32);
+ let f:f32=(-a).into();
+ assert_eq!(f,-0.25f32);
+ let a=I256F256::from(0);
+ let f:f32=(-a).into();
+ assert_eq!(f,0f32);
+ let a=I256F256::from(237946589723468975i64)<<16;
+ let f:f32=a.into();
+ assert_eq!(f,237946589723468975f32*2.0f32.powi(16));
+}
+
+#[test]
+fn to_f64(){
+ let a=I256F256::from(1)>>2;
+ let f:f64=a.into();
+ assert_eq!(f,0.25f64);
+ let f:f64=(-a).into();
+ assert_eq!(f,-0.25f64);
+ let a=I256F256::from(0);
+ let f:f64=(-a).into();
+ assert_eq!(f,0f64);
+ let a=I256F256::from(237946589723468975i64)<<16;
+ let f:f64=a.into();
+ assert_eq!(f,237946589723468975f64*2.0f64.powi(16));
+}
+
+#[test]
+fn from_f32(){
+ let a=I256F256::from(1)>>2;
+ let b:Result=0.25f32.try_into();
+ assert_eq!(b,Ok(a));
+ let a=I256F256::from(-1)>>2;
+ let b:Result=(-0.25f32).try_into();
+ assert_eq!(b,Ok(a));
+ let a=I256F256::from(0);
+ let b:Result=0.try_into();
+ assert_eq!(b,Ok(a));
+ let a=I256F256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
+ let b:Result=(0b101011110101001010101010000000000000000000000000000u64 as f32*2.0f32.powi(16)).try_into();
+ assert_eq!(b,Ok(a));
+ //I32F32::MAX into f32 is truncated into this value
+ let a=I32F32::raw(0b111111111111111111111111000000000000000000000000000000000000000i64);
+ let b:Result=Into::::into(I32F32::MAX).try_into();
+ assert_eq!(b,Ok(a));
+ //I32F32::MIN hits a special case since it's not representable as a positive signed integer
+ //TODO: don't return an overflow because this is technically possible
+ let a=I32F32::MIN;
+ let b:Result=Into::::into(I32F32::MIN).try_into();
+ assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
+ //16 is within the 24 bits of float precision
+ let b:Result=Into::::into(-I32F32::MIN.fix_2()).try_into();
+ assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
+ let b:Result=f32::MIN_POSITIVE.try_into();
+ assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Underflow));
+ //test many cases
+ for i in 0..64{
+ let a=crate::fixed::Fixed::<2,64>::raw_digit(0b111111111111111111111111000000000000000000000000000000000000000i64)<,_>=f.try_into();
+ assert_eq!(b,Ok(a));
+ }
+}
+
+#[test]
+fn from_f64(){
+ let a=I256F256::from(1)>>2;
+ let b:Result=0.25f64.try_into();
+ assert_eq!(b,Ok(a));
+ let a=I256F256::from(-1)>>2;
+ let b:Result=(-0.25f64).try_into();
+ assert_eq!(b,Ok(a));
+ let a=I256F256::from(0);
+ let b:Result=0.try_into();
+ assert_eq!(b,Ok(a));
+ let a=I256F256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
+ let b:Result=(0b101011110101001010101010000000000000000000000000000u64 as f64*2.0f64.powi(16)).try_into();
+ assert_eq!(b,Ok(a));
+}
+
+#[test]
+fn you_can_shr_numbers(){
+ let a=I32F32::from(4);
+ assert_eq!(a>>1,I32F32::from(2));
+}
+
+#[test]
+fn test_wide_mul(){
+ let a=I32F32::ONE;
+ let aa=a.wide_mul_1_1(a);
+ assert_eq!(aa,crate::types::I64F64::ONE);
+}
+
+#[test]
+fn test_wide_div(){
+ let a=I32F32::ONE*4;
+ let b=I32F32::ONE*2;
+ let wide_a=a.wide_mul_1_1(I32F32::ONE);
+ let wide_b=b.wide_mul_1_1(I32F32::ONE);
+ let ab=a.wide_div_1_1(b);
+ assert_eq!(ab,crate::types::I64F64::ONE*2);
+ let wab=wide_a.wide_div_2_1(b);
+ assert_eq!(wab,crate::fixed::Fixed::<3,96>::ONE*2);
+ let awb=a.wide_div_1_2(wide_b);
+ assert_eq!(awb,crate::fixed::Fixed::<3,96>::ONE*2);
+}
+
+#[test]
+fn test_wide_mul_repeated() {
+ let a=I32F32::from(2);
+ let b=I32F32::from(3);
+
+ let w1=a.wide_mul_1_1(b);
+ let w2=w1.wide_mul_2_2(w1);
+ let w3=w2.wide_mul_4_4(w2);
+
+ assert_eq!(w3,I256F256::from((3i128*2).pow(4)));
+}
+
+#[test]
+fn test_bint(){
+ let a=I32F32::ONE;
+ assert_eq!(a*2,I32F32::from(2));
+}
+
+#[test]
+fn test_fix(){
+ assert_eq!(I32F32::ONE.fix_8(),I256F256::ONE);
+ assert_eq!(I32F32::ONE,I256F256::ONE.fix_1());
+ assert_eq!(I32F32::NEG_ONE.fix_8(),I256F256::NEG_ONE);
+ assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.fix_1());
+}
+#[test]
+fn test_sqrt(){
+ let a=I32F32::ONE*4;
+ assert_eq!(a.sqrt(),I32F32::from(2));
+}
+#[test]
+fn test_sqrt_zero(){
+ let a=I32F32::ZERO;
+ assert_eq!(a.sqrt(),I32F32::ZERO);
+}
+#[test]
+fn test_sqrt_low(){
+ let a=I32F32::HALF;
+ let b=a.fixed_mul(a);
+ assert_eq!(b.sqrt(),a);
+}
+fn find_equiv_sqrt_via_f64(n:I32F32)->I32F32{
+ //GIMME THEM BITS BOY
+ let &[bits]=n.to_bits().to_bits().digits();
+ let ibits=bits as i64;
+ let f=(ibits as f64)/((1u64<<32) as f64);
+ let f_ans=f.sqrt();
+ let i=(f_ans*((1u64<<32) as f64)) as i64;
+ let r=I32F32::from_bits(bnum::BInt::<1>::from(i));
+ //mimic the behaviour of the algorithm,
+ //return the result if it truncates to the exact answer
+ if (r+I32F32::EPSILON).wide_mul_1_1(r+I32F32::EPSILON)==n.wide_mul_1_1(I32F32::ONE){
+ return r+I32F32::EPSILON;
+ }
+ if (r-I32F32::EPSILON).wide_mul_1_1(r-I32F32::EPSILON)==n.wide_mul_1_1(I32F32::ONE){
+ return r-I32F32::EPSILON;
+ }
+ return r;
+}
+fn test_exact(n:I32F32){
+ assert_eq!(n.sqrt(),find_equiv_sqrt_via_f64(n));
+}
+#[test]
+fn test_sqrt_exact(){
+ //43
+ for i in 0..((i64::MAX as f32).ln() as u32){
+ let n=I32F32::from_bits(bnum::BInt::<1>::from((i as f32).exp() as i64));
+ test_exact(n);
+ }
+}
+#[test]
+fn test_sqrt_max(){
+ let a=I32F32::MAX;
+ test_exact(a);
+}
+#[test]
+#[cfg(all(feature="zeroes",not(feature="deferred-division")))]
+fn test_zeroes_normal(){
+ // (x-1)*(x+1)
+ // x^2-1
+ let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
+ assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE,I32F32::ONE]));
+ let zeroes=I32F32::zeroes2(I32F32::NEG_ONE*3,I32F32::ONE*2,I32F32::ONE);
+ assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE*3,I32F32::ONE]));
+}
+#[test]
+#[cfg(all(feature="zeroes",feature="deferred-division"))]
+fn test_zeroes_deferred_division(){
+ // (x-1)*(x+1)
+ // x^2-1
+ let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
+ assert_eq!(
+ zeroes,
+ arrayvec::ArrayVec::from_iter([
+ ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::NEG_ONE*2),
+ ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::ONE*2),
+ ])
+ );
+}
diff --git a/lib/fixed_wide/src/types.rs b/lib/fixed_wide/src/types.rs
new file mode 100644
index 0000000..8694435
--- /dev/null
+++ b/lib/fixed_wide/src/types.rs
@@ -0,0 +1,4 @@
+pub type I32F32=crate::fixed::Fixed<1,32>;
+pub type I64F64=crate::fixed::Fixed<2,64>;
+pub type I128F128=crate::fixed::Fixed<4,128>;
+pub type I256F256=crate::fixed::Fixed<8,256>;
diff --git a/lib/fixed_wide/src/zeroes.rs b/lib/fixed_wide/src/zeroes.rs
new file mode 100644
index 0000000..7f1dbc9
--- /dev/null
+++ b/lib/fixed_wide/src/zeroes.rs
@@ -0,0 +1,53 @@
+use crate::fixed::Fixed;
+
+use arrayvec::ArrayVec;
+use std::cmp::Ordering;
+macro_rules! impl_zeroes{
+ ($n:expr)=>{
+ impl Fixed<$n,{$n*32}>{
+ #[inline]
+ pub fn zeroes2(a0:Self,a1:Self,a2:Self)->ArrayVec<::Output,2>{
+ let a2pos=match a2.cmp(&Self::ZERO){
+ Ordering::Greater=>true,
+ Ordering::Equal=>return ArrayVec::from_iter(Self::zeroes1(a0,a1).into_iter()),
+ Ordering::Less=>false,
+ };
+ let radicand=a1*a1-a2*a0*4;
+ match radicand.cmp(&::Output::ZERO){
+ Ordering::Greater=>{
+ paste::item!{
+ let planar_radicand=radicand.sqrt().[]();
+ }
+ //sort roots ascending and avoid taking the difference of large numbers
+ let zeroes=match (a2pos,Self::ZERO[(-a1-planar_radicand)/(a2*2),(a0*2)/(-a1-planar_radicand)],
+ (true, false)=>[(a0*2)/(-a1+planar_radicand),(-a1+planar_radicand)/(a2*2)],
+ (false,true )=>[(a0*2)/(-a1-planar_radicand),(-a1-planar_radicand)/(a2*2)],
+ (false,false)=>[(-a1+planar_radicand)/(a2*2),(a0*2)/(-a1+planar_radicand)],
+ };
+ ArrayVec::from_iter(zeroes)
+ },
+ Ordering::Equal=>ArrayVec::from_iter([(a1)/(a2*-2)]),
+ Ordering::Less=>ArrayVec::new_const(),
+ }
+ }
+ #[inline]
+ pub fn zeroes1(a0:Self,a1:Self)->ArrayVec<::Output,1>{
+ if a1==Self::ZERO{
+ ArrayVec::new_const()
+ }else{
+ ArrayVec::from_iter([(-a0)/(a1)])
+ }
+ }
+ }
+ };
+}
+impl_zeroes!(1);
+impl_zeroes!(2);
+impl_zeroes!(3);
+impl_zeroes!(4);
+//sqrt doubles twice!
+//impl_zeroes!(5);
+//impl_zeroes!(6);
+//impl_zeroes!(7);
+//impl_zeroes!(8);
diff --git a/lib/linear_ops/.gitignore b/lib/linear_ops/.gitignore
new file mode 100644
index 0000000..ea8c4bf
--- /dev/null
+++ b/lib/linear_ops/.gitignore
@@ -0,0 +1 @@
+/target
diff --git a/lib/linear_ops/Cargo.lock b/lib/linear_ops/Cargo.lock
new file mode 100644
index 0000000..649a61c
--- /dev/null
+++ b/lib/linear_ops/Cargo.lock
@@ -0,0 +1,36 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "bnum"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50202def95bf36cb7d1d7a7962cea1c36a3f8ad42425e5d2b71d7acb8041b5b8"
+
+[[package]]
+name = "fixed_wide"
+version = "0.1.1"
+dependencies = [
+ "bnum",
+ "paste",
+]
+
+[[package]]
+name = "linear_ops"
+version = "0.1.0"
+dependencies = [
+ "fixed_wide",
+ "paste",
+ "ratio_ops",
+]
+
+[[package]]
+name = "paste"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
+
+[[package]]
+name = "ratio_ops"
+version = "0.1.0"
diff --git a/lib/linear_ops/Cargo.toml b/lib/linear_ops/Cargo.toml
new file mode 100644
index 0000000..e229c97
--- /dev/null
+++ b/lib/linear_ops/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "linear_ops"
+version = "0.1.0"
+edition = "2021"
+repository = "https://git.itzana.me/StrafesNET/fixed_wide_vectors"
+license = "MIT OR Apache-2.0"
+description = "Vector/Matrix operations using trait bounds."
+authors = ["Rhys Lloyd "]
+
+[features]
+default=["named-fields","fixed-wide"]
+named-fields=[]
+fixed-wide=["dep:fixed_wide","dep:paste"]
+deferred-division=["dep:ratio_ops"]
+
+[dependencies]
+ratio_ops = { version = "0.1.0", path = "../ratio_ops", registry = "strafesnet", optional = true }
+fixed_wide = { version = "0.1.0", path = "../fixed_wide", registry = "strafesnet", optional = true }
+paste = { version = "1.0.15", optional = true }
+
+[dev-dependencies]
+fixed_wide = { version = "0.1.0", path = "../fixed_wide", registry = "strafesnet", features = ["wide-mul"] }
diff --git a/lib/linear_ops/LICENSE-APACHE b/lib/linear_ops/LICENSE-APACHE
new file mode 100644
index 0000000..a7e77cb
--- /dev/null
+++ b/lib/linear_ops/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
\ No newline at end of file
diff --git a/lib/linear_ops/LICENSE-MIT b/lib/linear_ops/LICENSE-MIT
new file mode 100644
index 0000000..468cd79
--- /dev/null
+++ b/lib/linear_ops/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/lib/linear_ops/src/lib.rs b/lib/linear_ops/src/lib.rs
new file mode 100644
index 0000000..628ca4e
--- /dev/null
+++ b/lib/linear_ops/src/lib.rs
@@ -0,0 +1,10 @@
+mod macros;
+pub mod types;
+pub mod vector;
+pub mod matrix;
+
+#[cfg(feature="named-fields")]
+mod named;
+
+#[cfg(test)]
+mod tests;
diff --git a/lib/linear_ops/src/macros/common.rs b/lib/linear_ops/src/macros/common.rs
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/lib/linear_ops/src/macros/common.rs
@@ -0,0 +1 @@
+
diff --git a/lib/linear_ops/src/macros/fixed_wide.rs b/lib/linear_ops/src/macros/fixed_wide.rs
new file mode 100644
index 0000000..199e7fb
--- /dev/null
+++ b/lib/linear_ops/src/macros/fixed_wide.rs
@@ -0,0 +1,79 @@
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_fixed_wide_vector_not_const_generic {
+ (
+ (),
+ $n:expr
+ ) => {
+ impl Vector>{
+ #[inline]
+ pub fn length(self)-> as core::ops::Mul>::Output{
+ self.length_squared().sqrt_unchecked()
+ }
+ #[inline]
+ pub fn with_length(self,length:U)-> as core::ops::Div< as core::ops::Mul>::Output>>::Output
+ where
+ fixed_wide::fixed::Fixed<$n,{$n*32}>:core::ops::Mul,
+ U:Copy,
+ V:core::ops::Div< as core::ops::Mul>::Output>,
+ {
+ self*length/self.length()
+ }
+ }
+ };
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! macro_4 {
+ ( $macro: ident, $any:tt ) => {
+ $crate::macro_repeated!($macro,$any,1,2,3,4);
+ }
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_fixed_wide_vector {
+ () => {
+ $crate::macro_4!(impl_fixed_wide_vector_not_const_generic,());
+ // I LOVE NOT BEING ABLE TO USE CONST GENERICS
+ $crate::macro_repeated!(
+ impl_fix_not_const_generic,(),
+ (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),
+ (1,2),(2,2),(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
+ (1,3),(2,3),(3,3),(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
+ (1,4),(2,4),(3,4),(4,4),(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
+ (1,5),(2,5),(3,5),(4,5),(5,5),(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
+ (1,6),(2,6),(3,6),(4,6),(5,6),(6,6),(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
+ (1,7),(2,7),(3,7),(4,7),(5,7),(6,7),(7,7),(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
+ (1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),(8,8),(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
+ (1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),(9,9),(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
+ (1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),(10,10),(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
+ (1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),(11,11),(12,11),(13,11),(14,11),(15,11),(16,11),
+ (1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),(12,12),(13,12),(14,12),(15,12),(16,12),
+ (1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),(13,13),(14,13),(15,13),(16,13),
+ (1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),(14,14),(15,14),(16,14),
+ (1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),(15,15),(16,15),
+ (1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),(16,16)
+ );
+ };
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_fix_not_const_generic{
+ (
+ (),
+ ($lhs:expr,$rhs:expr)
+ )=>{
+ impl Vector>
+ {
+ paste::item!{
+ #[inline]
+ pub fn [](self)->Vector>{
+ self.map(|t|t.[]())
+ }
+ }
+ }
+ }
+}
diff --git a/lib/linear_ops/src/macros/matrix.rs b/lib/linear_ops/src/macros/matrix.rs
new file mode 100644
index 0000000..69db874
--- /dev/null
+++ b/lib/linear_ops/src/macros/matrix.rs
@@ -0,0 +1,272 @@
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_matrix {
+ () => {
+ impl Matrix{
+ #[inline(always)]
+ pub const fn new(array:[[T;Y];X])->Self{
+ Self{array}
+ }
+ #[inline(always)]
+ pub fn to_array(self)->[[T;Y];X]{
+ self.array
+ }
+ #[inline]
+ pub fn from_cols(cols:[Vector;X])->Self
+ {
+ Matrix::new(
+ cols.map(|col|col.array),
+ )
+ }
+ #[inline]
+ pub fn map(self,f:F)->Matrix
+ where
+ F:Fn(T)->U
+ {
+ Matrix::new(
+ self.array.map(|inner|inner.map(&f)),
+ )
+ }
+ #[inline]
+ pub fn transpose(self)->Matrix{
+ //how did I think of this
+ let mut array_of_iterators=self.array.map(|axis|axis.into_iter());
+ Matrix::new(
+ core::array::from_fn(|_|
+ array_of_iterators.each_mut().map(|iter|
+ iter.next().unwrap()
+ )
+ )
+ )
+ }
+ #[inline]
+ // old (list of rows) MatY.MatX = MatY
+ // new (list of columns) MatX.MatZ = MatZ
+ pub fn dot(self,rhs:Matrix)->Matrix
+ where
+ T:core::ops::Mul+Copy,
+ V:core::iter::Sum,
+ U:Copy,
+ {
+ let mut array_of_iterators=self.array.map(|axis|axis.into_iter().cycle());
+ Matrix{
+ array:rhs.array.map(|rhs_axis|
+ core::array::from_fn(|_|
+ array_of_iterators
+ .iter_mut()
+ .zip(rhs_axis.iter())
+ .map(|(lhs_iter,&rhs_value)|
+ lhs_iter.next().unwrap()*rhs_value
+ ).sum()
+ )
+ )
+ }
+ }
+ #[inline]
+ // MatX.VecY = VecX
+ pub fn transform_vector(self,rhs:Vector)->Vector
+ where
+ T:core::ops::Mul,
+ V:core::iter::Sum,
+ U:Copy,
+ {
+ let mut array_of_iterators=self.array.map(|axis|axis.into_iter());
+ Vector::new(
+ core::array::from_fn(|_|
+ array_of_iterators
+ .iter_mut()
+ .zip(rhs.array.iter())
+ .map(|(lhs_iter,&rhs_value)|
+ lhs_iter.next().unwrap()*rhs_value
+ ).sum()
+ )
+ )
+ }
+ }
+ impl Matrix
+ where
+ T:Copy
+ {
+ #[inline(always)]
+ pub const fn from_value(value:T)->Self{
+ Self::new([[value;Y];X])
+ }
+ }
+
+ impl Default for Matrix{
+ #[inline]
+ fn default()->Self{
+ Self::new(
+ core::array::from_fn(|_|core::array::from_fn(|_|Default::default()))
+ )
+ }
+ }
+
+ impl core::fmt::Display for Matrix{
+ #[inline]
+ fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
+ for col in &self.array[0..X]{
+ core::write!(f,"\n")?;
+ for elem in &col[0..Y-1]{
+ core::write!(f,"{}, ",elem)?;
+ }
+ // assume we will be using matrices of size 1x1 or greater
+ core::write!(f,"{}",col.last().unwrap())?;
+ }
+ Ok(())
+ }
+ }
+
+ impl core::ops::Mul> for Matrix
+ where
+ T:core::ops::Mul+Copy,
+ V:core::iter::Sum,
+ U:Copy,
+ {
+ type Output=Matrix;
+ #[inline]
+ fn mul(self,rhs:Matrix)->Self::Output{
+ self.dot(rhs)
+ }
+ }
+ impl core::ops::Mul> for Matrix
+ where
+ T:core::ops::Mul,
+ V:core::iter::Sum,
+ U:Copy,
+ {
+ type Output=Vector;
+ #[inline]
+ fn mul(self,rhs:Vector)->Self::Output{
+ self.transform_vector(rhs)
+ }
+ }
+ #[cfg(feature="deferred-division")]
+ $crate::impl_matrix_deferred_division!();
+ }
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_matrix_deferred_division {
+ () => {
+ impl,U:Copy,V> ratio_ops::ratio::Divide for Matrix{
+ type Output=Matrix;
+ #[inline]
+ fn divide(self,rhs:U)->Self::Output{
+ self.map(|t|t.divide(rhs))
+ }
+ }
+ impl core::ops::Div for Matrix{
+ type Output=ratio_ops::ratio::Ratio,U>;
+ #[inline]
+ fn div(self,rhs:U)->Self::Output{
+ ratio_ops::ratio::Ratio::new(self,rhs)
+ }
+ }
+ }
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_matrix_extend {
+ ( $x: expr, $y: expr ) => {
+ impl Matrix<$x,$y,T>{
+ #[inline]
+ pub fn extend_column(self,value:Vector<$y,T>)->Matrix<{$x+1},$y,T>{
+ let mut iter=self.array.into_iter().chain(core::iter::once(value.array));
+ Matrix::new(
+ core::array::from_fn(|_|iter.next().unwrap()),
+ )
+ }
+ #[inline]
+ pub fn extend_row(self,value:Vector<$x,T>)->Matrix<$x,{$y+1},T>{
+ let mut iter_rows=value.array.into_iter();
+ Matrix::new(
+ self.array.map(|axis|{
+ let mut elements_iter=axis.into_iter().chain(core::iter::once(iter_rows.next().unwrap()));
+ core::array::from_fn(|_|elements_iter.next().unwrap())
+ })
+ )
+ }
+ }
+ }
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_matrix_named_fields_shape {
+ (
+ ($struct_outer:ident, $size_outer: expr),
+ ($size_inner: expr)
+ ) => {
+ impl core::ops::Deref for Matrix<$size_outer,$size_inner,T>{
+ type Target=$struct_outer>;
+ #[inline]
+ fn deref(&self)->&Self::Target{
+ unsafe{core::mem::transmute(&self.array)}
+ }
+ }
+ impl core::ops::DerefMut for Matrix<$size_outer,$size_inner,T>{
+ #[inline]
+ fn deref_mut(&mut self)->&mut Self::Target{
+ unsafe{core::mem::transmute(&mut self.array)}
+ }
+ }
+ }
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_matrix_named_fields_shape_shim {
+ (
+ ($($vector_info:tt),+),
+ $matrix_info:tt
+ ) => {
+ $crate::macro_repeated!(impl_matrix_named_fields_shape,$matrix_info,$($vector_info),+);
+ }
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_matrix_named_fields {
+ (
+ ($($matrix_info:tt),+),
+ $vector_infos:tt
+ ) => {
+ $crate::macro_repeated!(impl_matrix_named_fields_shape_shim,$vector_infos,$($matrix_info),+);
+ }
+}
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_matrix_3x3 {
+ ()=>{
+ impl Matrix<3,3,T>
+ where
+ //cross
+ T:core::ops::Mul+Copy,
+ T2:core::ops::Sub,
+ //dot
+ T:core::ops::Mul<::Output,Output=T3>,
+ T3:core::iter::Sum,
+ {
+ pub fn det(self)->T3{
+ self.x_axis.dot(self.y_axis.cross(self.z_axis))
+ }
+ }
+ impl Matrix<3,3,T>
+ where
+ T:core::ops::Mul+Copy,
+ T2:core::ops::Sub,
+ {
+ pub fn adjugate(self)->Matrix<3,3,::Output>{
+ Matrix::new([
+ [self.y_axis.y*self.z_axis.z-self.y_axis.z*self.z_axis.y,self.x_axis.z*self.z_axis.y-self.x_axis.y*self.z_axis.z,self.x_axis.y*self.y_axis.z-self.x_axis.z*self.y_axis.y],
+ [self.y_axis.z*self.z_axis.x-self.y_axis.x*self.z_axis.z,self.x_axis.x*self.z_axis.z-self.x_axis.z*self.z_axis.x,self.x_axis.z*self.y_axis.x-self.x_axis.x*self.y_axis.z],
+ [self.y_axis.x*self.z_axis.y-self.y_axis.y*self.z_axis.x,self.x_axis.y*self.z_axis.x-self.x_axis.x*self.z_axis.y,self.x_axis.x*self.y_axis.y-self.x_axis.y*self.y_axis.x],
+ ])
+ }
+ }
+ }
+}
diff --git a/lib/linear_ops/src/macros/mod.rs b/lib/linear_ops/src/macros/mod.rs
new file mode 100644
index 0000000..e0a9d02
--- /dev/null
+++ b/lib/linear_ops/src/macros/mod.rs
@@ -0,0 +1,20 @@
+pub mod common;
+pub mod vector;
+pub mod matrix;
+
+#[cfg(feature="fixed-wide")]
+pub mod fixed_wide;
+
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! macro_repeated{
+ (
+ $macro:ident,
+ $any:tt,
+ $($repeated:tt),*
+ )=>{
+ $(
+ $crate::$macro!($any, $repeated);
+ )*
+ };
+}
diff --git a/lib/linear_ops/src/macros/vector.rs b/lib/linear_ops/src/macros/vector.rs
new file mode 100644
index 0000000..d3beecf
--- /dev/null
+++ b/lib/linear_ops/src/macros/vector.rs
@@ -0,0 +1,357 @@
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_vector {
+ () => {
+ impl Vector{
+ #[inline(always)]
+ pub const fn new(array:[T;N])->Self{
+ Self{array}
+ }
+ #[inline(always)]
+ pub fn to_array(self)->[T;N]{
+ self.array
+ }
+ #[inline]
+ pub fn map(self,f:F)->Vector
+ where
+ F:Fn(T)->U
+ {
+ Vector::new(
+ self.array.map(f)
+ )
+ }
+ #[inline]
+ pub fn map_zip(self,other:Vector,f:F)->Vector
+ where
+ F:Fn((T,U))->V,
+ {
+ let mut iter=self.array.into_iter().zip(other.array);
+ Vector::new(
+ core::array::from_fn(|_|f(iter.next().unwrap())),
+ )
+ }
+ }
+ impl Vector{
+ #[inline(always)]
+ pub const fn from_value(value:T)->Self{
+ Self::new([value;N])
+ }
+ }
+
+ impl Default for Vector{
+ #[inline]
+ fn default()->Self{
+ Self::new(
+ core::array::from_fn(|_|Default::default())
+ )
+ }
+ }
+
+ impl core::fmt::Display for Vector{
+ #[inline]
+ fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
+ for elem in &self.array[0..N-1]{
+ core::write!(f,"{}, ",elem)?;
+ }
+ // assume we will be using vectors of length 1 or greater
+ core::write!(f,"{}",self.array.last().unwrap())
+ }
+ }
+
+ impl