diff --git a/lib/fixed_wide/src/fixed.rs b/lib/fixed_wide/src/fixed.rs
index 16f721dd6..f1a0cafa2 100644
--- a/lib/fixed_wide/src/fixed.rs
+++ b/lib/fixed_wide/src/fixed.rs
@@ -663,74 +663,94 @@ macro_repeated!(
 	1,2,3,4,5,6,7,8
 );
 
-pub trait Fix<Out>{
-	fn fix(self)->Out;
+#[derive(Debug,Eq,PartialEq)]
+pub enum NarrowError{
+	Overflow,
+	Underflow,
 }
 
-macro_rules! impl_fix_rhs_lt_lhs_not_const_generic{
+pub trait Wrap<Output>{
+	fn wrap(self)->Output;
+}
+pub trait Clamp<Output>{
+	fn clamp(self)->Output;
+}
+impl<const N:usize,const F:usize> Clamp<Fixed<N,F>> for Result<Fixed<N,F>,NarrowError>{
+	fn clamp(self)->Fixed<N,F>{
+		match self{
+			Ok(fixed)=>fixed,
+			Err(NarrowError::Overflow)=>Fixed::MAX,
+			Err(NarrowError::Underflow)=>Fixed::MIN,
+		}
+	}
+}
+
+macro_rules! impl_narrow_not_const_generic{
 	(
 		(),
 		($lhs:expr,$rhs:expr)
 	)=>{
-		impl Fixed<$lhs,{$lhs*32}>
-		{
-			paste::item!{
+		paste::item!{
+			impl Fixed<$lhs,{$lhs*32}>
+			{
 				#[inline]
-				pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
+				pub fn [<wrap_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
 					Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits.shr(($lhs-$rhs)*32)))
 				}
+				#[inline]
+				pub fn [<narrow_ $rhs>](self)->Result<Fixed<$rhs,{$rhs*32}>,NarrowError>{
+					if Fixed::<$rhs,{$rhs*32}>::MAX.[<widen_ $lhs>]().bits<self.bits{
+						return Err(NarrowError::Overflow);
+					}
+					if self.bits<Fixed::<$rhs,{$rhs*32}>::MIN.[<widen_ $lhs>]().bits{
+						return Err(NarrowError::Underflow);
+					}
+					Ok(self.[<wrap_ $rhs>]())
+				}
+				#[inline]
+				pub fn [<clamp_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
+					self.[<narrow_ $rhs>]().clamp()
+				}
 			}
-		}
-		impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
-			fn fix(self)->Fixed<$rhs,{$rhs*32}>{
-				paste::item!{
-				self.[<fix_ $rhs>]()
+			impl Wrap<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
+				#[inline]
+				fn wrap(self)->Fixed<$rhs,{$rhs*32}>{
+					self.[<wrap_ $rhs>]()
+				}
+			}
+			impl TryInto<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
+				type Error=NarrowError;
+				#[inline]
+				fn try_into(self)->Result<Fixed<$rhs,{$rhs*32}>,Self::Error>{
+					self.[<narrow_ $rhs>]()
+				}
+			}
+			impl Clamp<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
+				#[inline]
+				fn clamp(self)->Fixed<$rhs,{$rhs*32}>{
+					self.[<clamp_ $rhs>]()
 				}
 			}
 		}
 	}
 }
-macro_rules! impl_fix_lhs_lt_rhs_not_const_generic{
+macro_rules! impl_widen_not_const_generic{
 	(
 		(),
 		($lhs:expr,$rhs:expr)
 	)=>{
-		impl Fixed<$lhs,{$lhs*32}>
-		{
-			paste::item!{
+		paste::item!{
+			impl Fixed<$lhs,{$lhs*32}>
+			{
 				#[inline]
-				pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
+				pub fn [<widen_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
 					Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits).shl(($rhs-$lhs)*32))
 				}
 			}
-		}
-		impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
-			fn fix(self)->Fixed<$rhs,{$rhs*32}>{
-				paste::item!{
-				self.[<fix_ $rhs>]()
-				}
-			}
-		}
-	}
-}
-macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{
-	(
-		(),
-		($lhs:expr,$rhs:expr)
-	)=>{
-		impl Fixed<$lhs,{$lhs*32}>
-		{
-			paste::item!{
+			impl Into<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
 				#[inline]
-				pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
-					self
-				}
-			}
-		}
-		impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
-			fn fix(self)->Fixed<$rhs,{$rhs*32}>{
-				paste::item!{
-				self.[<fix_ $rhs>]()
+				fn into(self)->Fixed<$rhs,{$rhs*32}>{
+					self.[<widen_ $rhs>]()
 				}
 			}
 		}
@@ -740,7 +760,7 @@ macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{
 // I LOVE NOT BEING ABLE TO USE CONST GENERICS
 
 macro_repeated!(
-	impl_fix_rhs_lt_lhs_not_const_generic,(),
+	impl_narrow_not_const_generic,(),
 	(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
 	(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
 	(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
@@ -758,7 +778,7 @@ macro_repeated!(
 	(16,15)
 );
 macro_repeated!(
-	impl_fix_lhs_lt_rhs_not_const_generic,(),
+	impl_widen_not_const_generic,(),
 	(1,2),
 	(1,3),(2,3),
 	(1,4),(2,4),(3,4),
@@ -773,11 +793,8 @@ macro_repeated!(
 	(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
 	(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
 	(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
-	(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16)
-);
-macro_repeated!(
-	impl_fix_lhs_eq_rhs_not_const_generic,(),
-	(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16)
+	(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),
+	(1,17)
 );
 
 macro_rules! impl_not_const_generic{
@@ -797,7 +814,7 @@ macro_rules! impl_not_const_generic{
 				let mut result=Self::ZERO;
 
 				//resize self to match the wide mul output
-				let wide_self=self.[<fix_ $_2n>]();
+				let wide_self=self.[<widen_ $_2n>]();
 				//descend down the bits and check if flipping each bit would push the square over the input value
 				for shift in (0..=max_shift).rev(){
 					result.as_bits_mut().as_bits_mut().set_bit(shift,true);
diff --git a/lib/fixed_wide/src/tests.rs b/lib/fixed_wide/src/tests.rs
index b4f89bafa..1808c10bb 100644
--- a/lib/fixed_wide/src/tests.rs
+++ b/lib/fixed_wide/src/tests.rs
@@ -61,7 +61,7 @@ fn from_f32(){
 	let b:Result<I32F32,_>=Into::<f32>::into(I32F32::MIN).try_into();
 	assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
 	//16 is within the 24 bits of float precision
-	let b:Result<I32F32,_>=Into::<f32>::into(-I32F32::MIN.fix_2()).try_into();
+	let b:Result<I32F32,_>=Into::<f32>::into(-I32F32::MIN.widen_2()).try_into();
 	assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
 	let b:Result<I32F32,_>=f32::MIN_POSITIVE.try_into();
 	assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Underflow));
@@ -136,11 +136,24 @@ fn test_bint(){
 }
 
 #[test]
-fn test_fix(){
-	assert_eq!(I32F32::ONE.fix_8(),I256F256::ONE);
-	assert_eq!(I32F32::ONE,I256F256::ONE.fix_1());
-	assert_eq!(I32F32::NEG_ONE.fix_8(),I256F256::NEG_ONE);
-	assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.fix_1());
+fn test_wrap(){
+	assert_eq!(I32F32::ONE,I256F256::ONE.wrap_1());
+	assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.wrap_1());
+}
+#[test]
+fn test_narrow(){
+	assert_eq!(Ok(I32F32::ONE),I256F256::ONE.narrow_1());
+	assert_eq!(Ok(I32F32::NEG_ONE),I256F256::NEG_ONE.narrow_1());
+}
+#[test]
+fn test_widen(){
+	assert_eq!(I32F32::ONE.widen_8(),I256F256::ONE);
+	assert_eq!(I32F32::NEG_ONE.widen_8(),I256F256::NEG_ONE);
+}
+#[test]
+fn test_clamp(){
+	assert_eq!(I32F32::ONE,I256F256::ONE.clamp_1());
+	assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.clamp_1());
 }
 #[test]
 fn test_sqrt(){
diff --git a/lib/fixed_wide/src/zeroes.rs b/lib/fixed_wide/src/zeroes.rs
index 7f1dbc917..0bde9dd96 100644
--- a/lib/fixed_wide/src/zeroes.rs
+++ b/lib/fixed_wide/src/zeroes.rs
@@ -15,8 +15,10 @@ macro_rules! impl_zeroes{
 				let radicand=a1*a1-a2*a0*4;
 				match radicand.cmp(&<Self as core::ops::Mul>::Output::ZERO){
 					Ordering::Greater=>{
+						// using wrap because sqrt always halves the number of leading digits.
+						// clamp would be more defensive, but is slower.
 						paste::item!{
-						let planar_radicand=radicand.sqrt().[<fix_ $n>]();
+						let planar_radicand=radicand.sqrt().[<wrap_ $n>]();
 						}
 						//sort roots ascending and avoid taking the difference of large numbers
 						let zeroes=match (a2pos,Self::ZERO<a1){
diff --git a/lib/linear_ops/src/macros/fixed_wide.rs b/lib/linear_ops/src/macros/fixed_wide.rs
index 199e7fb3f..1434ea66a 100644
--- a/lib/linear_ops/src/macros/fixed_wide.rs
+++ b/lib/linear_ops/src/macros/fixed_wide.rs
@@ -38,40 +38,95 @@ macro_rules! impl_fixed_wide_vector {
 		$crate::macro_4!(impl_fixed_wide_vector_not_const_generic,());
 		// I LOVE NOT BEING ABLE TO USE CONST GENERICS
 		$crate::macro_repeated!(
-			impl_fix_not_const_generic,(),
-			(1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),
-			(1,2),(2,2),(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
-			(1,3),(2,3),(3,3),(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
-			(1,4),(2,4),(3,4),(4,4),(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
-			(1,5),(2,5),(3,5),(4,5),(5,5),(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
-			(1,6),(2,6),(3,6),(4,6),(5,6),(6,6),(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
-			(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),(7,7),(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
-			(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),(8,8),(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
-			(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),(9,9),(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
-			(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),(10,10),(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
-			(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),(11,11),(12,11),(13,11),(14,11),(15,11),(16,11),
-			(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),(12,12),(13,12),(14,12),(15,12),(16,12),
-			(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),(13,13),(14,13),(15,13),(16,13),
-			(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),(14,14),(15,14),(16,14),
-			(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),(15,15),(16,15),
-			(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),(16,16)
+			impl_narrow_not_const_generic,(),
+			(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
+			(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
+			(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
+			(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
+			(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
+			(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
+			(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
+			(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
+			(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
+			(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
+			(12,11),(13,11),(14,11),(15,11),(16,11),
+			(13,12),(14,12),(15,12),(16,12),
+			(14,13),(15,13),(16,13),
+			(15,14),(16,14),
+			(16,15)
 		);
+		$crate::macro_repeated!(
+			impl_widen_not_const_generic,(),
+			(1,2),
+			(1,3),(2,3),
+			(1,4),(2,4),(3,4),
+			(1,5),(2,5),(3,5),(4,5),
+			(1,6),(2,6),(3,6),(4,6),(5,6),
+			(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
+			(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
+			(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
+			(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
+			(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
+			(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
+			(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
+			(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
+			(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
+			(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),
+			(1,17)
+		);
+		impl<const N:usize,T:fixed_wide::fixed::Wrap<U>,U> fixed_wide::fixed::Wrap<Vector<N,U>> for Vector<N,T>
+		{
+			#[inline]
+			fn wrap(self)->Vector<N,U>{
+				self.map(|t|t.wrap())
+			}
+		}
+		impl<const N:usize,T:fixed_wide::fixed::Clamp<U>,U> fixed_wide::fixed::Clamp<Vector<N,U>> for Vector<N,T>
+		{
+			#[inline]
+			fn clamp(self)->Vector<N,U>{
+				self.map(|t|t.clamp())
+			}
+		}
 	};
 }
-
 #[doc(hidden)]
 #[macro_export(local_inner_macros)]
-macro_rules! impl_fix_not_const_generic{
+macro_rules! impl_narrow_not_const_generic{
 	(
 		(),
 		($lhs:expr,$rhs:expr)
 	)=>{
-		impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>
-		{
-			paste::item!{
+		paste::item!{
+			impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>{
 				#[inline]
-				pub fn [<fix_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
-					self.map(|t|t.[<fix_ $rhs>]())
+				pub fn [<wrap_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
+					self.map(|t|t.[<wrap_ $rhs>]())
+				}
+				#[inline]
+				pub fn [<narrow_ $rhs>](self)->Vector<N,Result<fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>,fixed_wide::fixed::NarrowError>>{
+					self.map(|t|t.[<narrow_ $rhs>]())
+				}
+				#[inline]
+				pub fn [<clamp_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
+					self.map(|t|t.[<clamp_ $rhs>]())
+				}
+			}
+		}
+	}
+}
+#[doc(hidden)]
+#[macro_export(local_inner_macros)]
+macro_rules! impl_widen_not_const_generic{
+	(
+		(),
+		($lhs:expr,$rhs:expr)
+	)=>{
+		paste::item!{
+			impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>{
+				#[inline]
+				pub fn [<widen_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
+					self.map(|t|t.[<widen_ $rhs>]())
 				}
 			}
 		}