Compare commits

..

5 Commits

Author SHA1 Message Date
6d382e8165 sad api 2024-09-18 18:48:47 -07:00
08c8b59bd9 run_scripts is now a member function 2024-09-18 15:34:56 -07:00
74b7265527 no 2024-09-17 17:59:34 -07:00
e68d15f4e3 run scripts 2024-09-17 17:44:18 -07:00
8d9b1eeb04 update specific deps 2024-09-17 17:44:18 -07:00
166 changed files with 1308 additions and 16316 deletions

View File

@ -1,6 +1,2 @@
[registries.strafesnet]
index = "sparse+https://git.itzana.me/api/packages/strafesnet/cargo/"
[target.x86_64-unknown-linux-gnu]
linker = "clang"
rustflags = ["-C", "link-arg=-fuse-ld=/usr/bin/mold"]
index = "sparse+https://git.itzana.me/api/packages/strafesnet/cargo/"

1
CONTRIBUTING.md Normal file
View File

@ -0,0 +1 @@
By contributing code to the [StrafesNET project](https://git.itzana.me/StrafesNET/strafe-client), you agree to license your contribution under the [License](LICENSE).

1222
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,34 @@
[workspace]
members = [
"lib/bsp_loader",
"lib/common",
"lib/deferred_loader",
"lib/fixed_wide",
"lib/linear_ops",
"lib/ratio_ops",
"lib/rbx_loader",
"lib/roblox_emulator",
"lib/snf",
"strafe-client",
]
resolver = "2"
[package]
name = "strafe-client"
version = "0.10.3"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-client"
license = "Custom"
description = "StrafesNET game client for bhop and surf."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["snf"]
snf = ["dep:strafesnet_snf"]
source = ["dep:strafesnet_deferred_loader", "dep:strafesnet_bsp_loader"]
roblox = ["dep:strafesnet_deferred_loader", "dep:strafesnet_rbx_loader"]
[dependencies]
bytemuck = { version = "1.13.1", features = ["derive"] }
configparser = "3.0.2"
ddsfile = "0.5.1"
glam = "0.28.0"
id = { version = "0.1.0", registry = "strafesnet" }
parking_lot = "0.12.1"
pollster = "0.3.0"
strafesnet_bsp_loader = { version = "0.1.3", registry = "strafesnet", optional = true }
strafesnet_common = { version = "0.4.0", registry = "strafesnet" }
strafesnet_deferred_loader = { version = "0.3.1", features = ["legacy"], registry = "strafesnet", optional = true }
strafesnet_rbx_loader = { version = "0.3.2", registry = "strafesnet", optional = true }
strafesnet_snf = { version = "0.1.2", registry = "strafesnet", optional = true }
wgpu = "22.0.0"
winit = "0.30.4"
[profile.release]
#lto = true

View File

@ -1,13 +1,10 @@
<img align="right" width="25%" src="logo.png">
<img align="right" width="25%" src="strafe.png">
# Strafe Project
Monorepo for working on projects related to strafe client.
# Strafe Client
In development client for jumping on squares (and riding on triangles)
## How to build and run
1. Have rust and git installed
2. `git clone https://git.itzana.me/StrafesNET/strafe-project`
3. `cd strafe-project`
4. `cargo run --release --bin strafe-client`
## Licenses
Each project has its own license. Most crates are MIT/Apache but notably the Strafe Client has a sole proprietor license.
2. `git clone https://git.itzana.me/StrafesNET/strafe-client`
3. `cd strafe-client`
4. `cargo run --release`

View File

@ -1,19 +0,0 @@
Vectors: Fixed Size, Fixed Point, Wide
======================================
## These exist separately in the Rust ecosystem, but not together.
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>

View File

@ -1 +0,0 @@
/target

View File

@ -1,16 +0,0 @@
[package]
name = "strafesnet_bsp_loader"
version = "0.2.2"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Convert Valve BSP files to StrafesNET data structures."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
glam = "0.29.0"
strafesnet_common = { path = "../common", registry = "strafesnet" }
vbsp = "0.6.0"
vmdl = "0.2.0"

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,19 +0,0 @@
StrafesNET BSP Loader
=====================
## Convert Valve BSP files into StrafesNET data structures
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>

View File

@ -1,333 +0,0 @@
use strafesnet_common::{map,model,integer,gameplay_attributes};
const VALVE_SCALE:f32=1.0/16.0;
fn valve_transform([x,y,z]:[f32;3])->integer::Planar64Vec3{
integer::vec3::try_from_f32_array([x*VALVE_SCALE,z*VALVE_SCALE,-y*VALVE_SCALE]).unwrap()
}
pub fn convert_bsp<AcquireRenderConfigId,AcquireMeshId>(
bsp:&vbsp::Bsp,
mut acquire_render_config_id:AcquireRenderConfigId,
mut acquire_mesh_id:AcquireMeshId
)->PartialMap1
where
AcquireRenderConfigId:FnMut(Option<&str>)->model::RenderConfigId,
AcquireMeshId:FnMut(&str)->model::MeshId,
{
//figure out real attributes later
let mut unique_attributes=Vec::new();
unique_attributes.push(gameplay_attributes::CollisionAttributes::Decoration);
const TEMP_TOUCH_ME_ATTRIBUTE:gameplay_attributes::CollisionAttributesId=gameplay_attributes::CollisionAttributesId::new(0);
let mut prop_mesh_count=0;
//declare all prop models to Loader
let prop_models=bsp.static_props().map(|prop|{
//get or create mesh_id
let mesh_id=acquire_mesh_id(prop.model());
//not the most failsafe code but this is just for the map tool lmao
if prop_mesh_count==mesh_id.get(){
prop_mesh_count+=1;
};
let placement=prop.as_prop_placement();
model::Model{
mesh:mesh_id,
attributes:TEMP_TOUCH_ME_ATTRIBUTE,
transform:integer::Planar64Affine3::new(
integer::mat3::try_from_f32_array_2d((
glam::Mat3A::from_diagonal(glam::Vec3::splat(placement.scale))
//TODO: figure this out
*glam::Mat3A::from_quat(glam::Quat::from_array(placement.rotation.into()))
).to_cols_array_2d()).unwrap(),
valve_transform(placement.origin.into()),
),
color:glam::Vec4::ONE,
}
}).collect();
//TODO: make the main map one single mesh with a bunch of different physics groups and graphics groups
//the generated MeshIds in here will collide with the Loader Mesh Ids
//but I can't think of a good workaround other than just remapping one later.
let world_meshes:Vec<model::Mesh>=bsp.models().map(|world_model|{
//non-deduplicated
let mut spam_pos=Vec::new();
let mut spam_tex=Vec::new();
let mut spam_normal=Vec::new();
let mut spam_vertices=Vec::new();
let mut graphics_groups=Vec::new();
let mut physics_group=model::IndexedPhysicsGroup::default();
let polygon_groups=world_model.faces().enumerate().map(|(polygon_group_id,face)|{
let polygon_group_id=model::PolygonGroupId::new(polygon_group_id as u32);
let face_texture=face.texture();
let face_texture_data=face_texture.texture_data();
//this would be better as a 4x2 matrix
let texture_transform_u=glam::Vec4::from_array(face_texture.texture_transforms_u)/(face_texture_data.width as f32);
let texture_transform_v=glam::Vec4::from_array(face_texture.texture_transforms_v)/(face_texture_data.height as f32);
//this automatically figures out what the texture is trying to do and creates
//a render config for it, and then returns the id to that render config
let render_id=acquire_render_config_id(Some(face_texture_data.name()));
//normal
let normal=face.normal();
let normal_idx=spam_normal.len() as u32;
spam_normal.push(valve_transform(normal.into()));
let mut polygon_iter=face.vertex_positions().map(|vertex_position|{
//world_model.origin seems to always be 0,0,0
let vertex_xyz=(world_model.origin+vertex_position).into();
let pos_idx=spam_pos.len();
spam_pos.push(valve_transform(vertex_xyz));
//calculate texture coordinates
let pos=glam::Vec3::from_array(vertex_xyz).extend(1.0);
let tex=glam::vec2(texture_transform_u.dot(pos),texture_transform_v.dot(pos));
let tex_idx=spam_tex.len() as u32;
spam_tex.push(tex);
let vertex_id=model::VertexId::new(spam_vertices.len() as u32);
spam_vertices.push(model::IndexedVertex{
pos:model::PositionId::new(pos_idx as u32),
tex:model::TextureCoordinateId::new(tex_idx as u32),
normal:model::NormalId::new(normal_idx),
color:model::ColorId::new(0),
});
vertex_id
});
let polygon_list=std::iter::from_fn(move||{
match (polygon_iter.next(),polygon_iter.next(),polygon_iter.next()){
(Some(v1),Some(v2),Some(v3))=>Some(vec![v1,v2,v3]),
//ignore extra vertices, not sure what to do in this case, failing the whole conversion could be appropriate
_=>None,
}
}).collect();
if face.is_visible(){
//TODO: deduplicate graphics groups by render id
graphics_groups.push(model::IndexedGraphicsGroup{
render:render_id,
groups:vec![polygon_group_id],
})
}
physics_group.groups.push(polygon_group_id);
model::PolygonGroup::PolygonList(model::PolygonList::new(polygon_list))
}).collect();
model::Mesh{
unique_pos:spam_pos,
unique_tex:spam_tex,
unique_normal:spam_normal,
unique_color:vec![glam::Vec4::ONE],
unique_vertices:spam_vertices,
polygon_groups,
graphics_groups,
physics_groups:vec![physics_group],
}
}).collect();
let world_models:Vec<model::Model>=
//one instance of the main world mesh
std::iter::once((
//world_model
model::MeshId::new(0),
//model_origin
vbsp::Vector::from([0.0,0.0,0.0]),
//model_color
vbsp::Color{r:255,g:255,b:255},
)).chain(
//entities sprinkle instances of the other meshes around
bsp.entities.iter()
.flat_map(|ent|ent.parse())//ignore entity parsing errors
.filter_map(|ent|match ent{
vbsp::Entity::Brush(brush)=>Some(brush),
vbsp::Entity::BrushIllusionary(brush)=>Some(brush),
vbsp::Entity::BrushWall(brush)=>Some(brush),
vbsp::Entity::BrushWallToggle(brush)=>Some(brush),
_=>None,
}).flat_map(|brush|
//The first character of brush.model is '*'
brush.model[1..].parse().map(|mesh_id|//ignore parse int errors
(model::MeshId::new(mesh_id),brush.origin,brush.color)
)
)
).map(|(mesh_id,model_origin,vbsp::Color{r,g,b})|{
model::Model{
mesh:mesh_id,
attributes:TEMP_TOUCH_ME_ATTRIBUTE,
transform:integer::Planar64Affine3::new(
integer::mat3::identity(),
valve_transform(model_origin.into())
),
color:(glam::Vec3::from_array([r as f32,g as f32,b as f32])/255.0).extend(1.0),
}
}).collect();
PartialMap1{
attributes:unique_attributes,
world_meshes,
prop_models,
world_models,
modes:strafesnet_common::gameplay_modes::Modes::new(Vec::new()),
}
}
//partially constructed map types
pub struct PartialMap1{
attributes:Vec<strafesnet_common::gameplay_attributes::CollisionAttributes>,
prop_models:Vec<model::Model>,
world_meshes:Vec<model::Mesh>,
world_models:Vec<model::Model>,
modes:strafesnet_common::gameplay_modes::Modes,
}
impl PartialMap1{
pub fn add_prop_meshes<AcquireRenderConfigId>(
self,
prop_meshes:impl IntoIterator<Item=(model::MeshId,crate::data::ModelData)>,
mut acquire_render_config_id:AcquireRenderConfigId,
)->PartialMap2
where
AcquireRenderConfigId:FnMut(Option<&str>)->model::RenderConfigId,
{
PartialMap2{
attributes:self.attributes,
prop_meshes:prop_meshes.into_iter().filter_map(|(mesh_id,model_data)|
//this will generate new render ids and texture ids
match convert_mesh(model_data,&mut acquire_render_config_id){
Ok(mesh)=>Some((mesh_id,mesh)),
Err(e)=>{
println!("error converting mesh: {e}");
None
}
}
).collect(),
prop_models:self.prop_models,
world_meshes:self.world_meshes,
world_models:self.world_models,
modes:self.modes,
}
}
}
pub struct PartialMap2{
attributes:Vec<strafesnet_common::gameplay_attributes::CollisionAttributes>,
prop_meshes:Vec<(model::MeshId,model::Mesh)>,
prop_models:Vec<model::Model>,
world_meshes:Vec<model::Mesh>,
world_models:Vec<model::Model>,
modes:strafesnet_common::gameplay_modes::Modes,
}
impl PartialMap2{
pub fn add_render_configs_and_textures(
mut self,
render_configs:impl IntoIterator<Item=(model::RenderConfigId,model::RenderConfig)>,
textures:impl IntoIterator<Item=(model::TextureId,Vec<u8>)>,
)->map::CompleteMap{
//merge mesh and model lists, flatten and remap all ids
let mesh_id_offset=self.world_meshes.len();
println!("prop_meshes.len()={}",self.prop_meshes.len());
let (mut prop_meshes,prop_mesh_id_map):(Vec<model::Mesh>,std::collections::HashMap<model::MeshId,model::MeshId>)
=self.prop_meshes.into_iter().enumerate().map(|(new_mesh_id,(old_mesh_id,mesh))|{
(mesh,(old_mesh_id,model::MeshId::new((mesh_id_offset+new_mesh_id) as u32)))
}).unzip();
self.world_meshes.append(&mut prop_meshes);
//there is no modes or runtime behaviour with references to the model ids currently
//so just relentlessly cull them if the mesh is missing
self.world_models.extend(self.prop_models.into_iter().filter_map(|mut model|
prop_mesh_id_map.get(&model.mesh).map(|&new_mesh_id|{
model.mesh=new_mesh_id;
model
})
));
//let mut models=Vec::new();
let (textures,texture_id_map):(Vec<Vec<u8>>,std::collections::HashMap<model::TextureId,model::TextureId>)
=textures.into_iter()
//.filter_map(f) cull unused textures
.enumerate().map(|(new_texture_id,(old_texture_id,texture))|{
(texture,(old_texture_id,model::TextureId::new(new_texture_id as u32)))
}).unzip();
let render_configs=render_configs.into_iter().map(|(render_config_id,mut render_config)|{
//this may generate duplicate no-texture render configs but idc
render_config.texture=render_config.texture.and_then(|texture_id|
texture_id_map.get(&texture_id).copied()
);
render_config
}).collect();
map::CompleteMap{
modes:self.modes,
attributes:self.attributes,
meshes:self.world_meshes,
models:self.world_models,
textures,
render_configs,
}
}
}
fn convert_mesh<AcquireRenderConfigId>(
model_data:crate::data::ModelData,
acquire_render_config_id:&mut AcquireRenderConfigId,
)->Result<model::Mesh,vmdl::ModelError>
where
AcquireRenderConfigId:FnMut(Option<&str>)->model::RenderConfigId,
{
let model=model_data.read_model()?;
let texture_paths=model.texture_directories();
if texture_paths.len()!=1{
println!("WARNING: multiple texture paths");
}
let skin=model.skin_tables().nth(0).unwrap();
let mut spam_pos=Vec::with_capacity(model.vertices().len());
let mut spam_normal=Vec::with_capacity(model.vertices().len());
let mut spam_tex=Vec::with_capacity(model.vertices().len());
let mut spam_vertices=Vec::with_capacity(model.vertices().len());
for (i,vertex) in model.vertices().iter().enumerate(){
spam_pos.push(valve_transform(vertex.position.into()));
spam_normal.push(valve_transform(vertex.normal.into()));
spam_tex.push(glam::Vec2::from_array(vertex.texture_coordinates));
spam_vertices.push(model::IndexedVertex{
pos:model::PositionId::new(i as u32),
tex:model::TextureCoordinateId::new(i as u32),
normal:model::NormalId::new(i as u32),
color:model::ColorId::new(0),
});
}
let mut graphics_groups=Vec::new();
let mut physics_groups=Vec::new();
let polygon_groups=model.meshes().enumerate().map(|(polygon_group_id,mesh)|{
let polygon_group_id=model::PolygonGroupId::new(polygon_group_id as u32);
let render_id=if let (Some(texture_path),Some(texture_name))=(texture_paths.get(0),skin.texture(mesh.material_index())){
let mut path=std::path::PathBuf::from(texture_path.as_str());
path.push(texture_name);
acquire_render_config_id(path.as_os_str().to_str())
}else{
acquire_render_config_id(None)
};
graphics_groups.push(model::IndexedGraphicsGroup{
render:render_id,
groups:vec![polygon_group_id],
});
physics_groups.push(model::IndexedPhysicsGroup{
groups:vec![polygon_group_id],
});
model::PolygonGroup::PolygonList(model::PolygonList::new(
//looking at the code, it would seem that the strips are pre-deindexed into triangle lists when calling this function
mesh.vertex_strip_indices().flat_map(|mut strip|
std::iter::from_fn(move||{
match (strip.next(),strip.next(),strip.next()){
(Some(v1),Some(v2),Some(v3))=>Some([v1,v2,v3].map(|vertex_id|model::VertexId::new(vertex_id as u32)).to_vec()),
//ignore extra vertices, not sure what to do in this case, failing the whole conversion could be appropriate
_=>None,
}
})
).collect()
))
}).collect();
Ok(model::Mesh{
unique_pos:spam_pos,
unique_normal:spam_normal,
unique_tex:spam_tex,
unique_color:vec![glam::Vec4::ONE],
unique_vertices:spam_vertices,
polygon_groups,
graphics_groups,
physics_groups,
})
}

View File

@ -1,60 +0,0 @@
pub struct Bsp(vbsp::Bsp);
impl Bsp{
pub const fn new(value:vbsp::Bsp)->Self{
Self(value)
}
}
impl AsRef<vbsp::Bsp> for Bsp{
fn as_ref(&self)->&vbsp::Bsp{
&self.0
}
}
pub struct MdlData(Vec<u8>);
impl MdlData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
}
impl AsRef<[u8]> for MdlData{
fn as_ref(&self)->&[u8]{
self.0.as_ref()
}
}
pub struct VtxData(Vec<u8>);
impl VtxData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
}
impl AsRef<[u8]> for VtxData{
fn as_ref(&self)->&[u8]{
self.0.as_ref()
}
}
pub struct VvdData(Vec<u8>);
impl VvdData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
}
impl AsRef<[u8]> for VvdData{
fn as_ref(&self)->&[u8]{
self.0.as_ref()
}
}
pub struct ModelData{
pub mdl:MdlData,
pub vtx:VtxData,
pub vvd:VvdData,
}
impl ModelData{
pub fn read_model(&self)->Result<vmdl::Model,vmdl::ModelError>{
Ok(vmdl::Model::from_parts(
vmdl::mdl::Mdl::read(self.mdl.as_ref())?,
vmdl::vtx::Vtx::read(self.vtx.as_ref())?,
vmdl::vvd::Vvd::read(self.vvd.as_ref())?,
))
}
}

View File

@ -1,37 +0,0 @@
mod bsp;
pub mod data;
pub use data::Bsp;
#[derive(Debug)]
pub enum ReadError{
Bsp(vbsp::BspError),
Io(std::io::Error),
}
impl std::fmt::Display for ReadError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for ReadError{}
pub fn read<R:std::io::Read>(mut input:R)->Result<Bsp,ReadError>{
let mut s=Vec::new();
//TODO: mmap
input.read_to_end(&mut s).map_err(ReadError::Io)?;
vbsp::Bsp::read(s.as_slice()).map(Bsp::new).map_err(ReadError::Bsp)
}
pub fn convert<AcquireRenderConfigId,AcquireMeshId>(
bsp:&Bsp,
acquire_render_config_id:AcquireRenderConfigId,
acquire_mesh_id:AcquireMeshId
)->bsp::PartialMap1
where
AcquireRenderConfigId:FnMut(Option<&str>)->strafesnet_common::model::RenderConfigId,
AcquireMeshId:FnMut(&str)->strafesnet_common::model::MeshId,
{
bsp::convert_bsp(bsp.as_ref(),acquire_render_config_id,acquire_mesh_id)
}

View File

@ -1 +0,0 @@
/target

View File

@ -1,19 +0,0 @@
[package]
name = "strafesnet_common"
version = "0.5.2"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Common types and helpers for Strafe Client associated projects."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
arrayvec = "0.7.4"
bitflags = "2.6.0"
fixed_wide = { path = "../fixed_wide", registry = "strafesnet", features = ["deferred-division","zeroes","wide-mul"] }
linear_ops = { path = "../linear_ops", registry = "strafesnet", features = ["deferred-division","named-fields"] }
ratio_ops = { path = "../ratio_ops", registry = "strafesnet" }
glam = "0.29.0"
id = { version = "0.1.0", registry = "strafesnet" }

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,19 +0,0 @@
StrafesNET Common Library
=========================
## Common types used in the StrafesNET ecosystem
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>

View File

@ -1,56 +0,0 @@
use crate::integer::{vec3,Planar64Vec3};
#[derive(Clone)]
pub struct Aabb{
min:Planar64Vec3,
max:Planar64Vec3,
}
impl Default for Aabb{
fn default()->Self{
Self{min:vec3::MAX,max:vec3::MIN}
}
}
impl Aabb{
pub const fn new(min:Planar64Vec3,max:Planar64Vec3)->Self{
Self{min,max}
}
pub const fn max(&self)->Planar64Vec3{
self.max
}
pub const fn min(&self)->Planar64Vec3{
self.min
}
pub fn grow(&mut self,point:Planar64Vec3){
self.min=self.min.min(point);
self.max=self.max.max(point);
}
pub fn join(&mut self,aabb:&Aabb){
self.min=self.min.min(aabb.min);
self.max=self.max.max(aabb.max);
}
pub fn inflate(&mut self,hs:Planar64Vec3){
self.min-=hs;
self.max+=hs;
}
pub fn intersects(&self,aabb:&Aabb)->bool{
let bvec=self.min.lt(aabb.max)&aabb.min.lt(self.max);
bvec.all()
}
pub fn size(&self)->Planar64Vec3{
self.max-self.min
}
pub fn center(&self)->Planar64Vec3{
self.min+(self.max-self.min)>>1
}
//probably use floats for area & volume because we don't care about precision
// pub fn area_weight(&self)->f32{
// let d=self.max-self.min;
// d.x*d.y+d.y*d.z+d.z*d.x
// }
// pub fn volume(&self)->f32{
// let d=self.max-self.min;
// d.x*d.y*d.z
// }
}

View File

@ -1,194 +0,0 @@
use crate::aabb::Aabb;
//da algaritum
//lista boxens
//sort by {minx,maxx,miny,maxy,minz,maxz} (6 lists)
//find the sets that minimizes the sum of surface areas
//splitting is done when the minimum split sum of surface areas is larger than the node's own surface area
//start with bisection into octrees because a bad bvh is still 1000x better than no bvh
//sort the centerpoints on each axis (3 lists)
//bv is put into octant based on whether it is upper or lower in each list
pub enum RecursiveContent<R,T>{
Branch(Vec<R>),
Leaf(T),
}
impl<R,T> Default for RecursiveContent<R,T>{
fn default()->Self{
Self::Branch(Vec::new())
}
}
pub struct BvhNode<T>{
content:RecursiveContent<BvhNode<T>,T>,
aabb:Aabb,
}
impl<T> Default for BvhNode<T>{
fn default()->Self{
Self{
content:Default::default(),
aabb:Aabb::default(),
}
}
}
pub struct BvhWeightNode<W,T>{
content:RecursiveContent<BvhWeightNode<W,T>,T>,
weight:W,
aabb:Aabb,
}
impl<T> BvhNode<T>{
pub fn the_tester<F:FnMut(&T)>(&self,aabb:&Aabb,f:&mut F){
match &self.content{
RecursiveContent::Leaf(model)=>f(model),
RecursiveContent::Branch(children)=>for child in children{
//this test could be moved outside the match statement
//but that would test the root node aabb
//you're probably not going to spend a lot of time outside the map,
//so the test is extra work for nothing
if aabb.intersects(&child.aabb){
child.the_tester(aabb,f);
}
},
}
}
pub fn into_visitor<F:FnMut(T)>(self,f:&mut F){
match self.content{
RecursiveContent::Leaf(model)=>f(model),
RecursiveContent::Branch(children)=>for child in children{
child.into_visitor(f)
},
}
}
pub fn weigh_contents<W:Copy+std::iter::Sum<W>,F:Fn(&T)->W>(self,f:&F)->BvhWeightNode<W,T>{
match self.content{
RecursiveContent::Leaf(model)=>BvhWeightNode{
weight:f(&model),
content:RecursiveContent::Leaf(model),
aabb:self.aabb,
},
RecursiveContent::Branch(children)=>{
let branch:Vec<BvhWeightNode<W,T>>=children.into_iter().map(|child|
child.weigh_contents(f)
).collect();
BvhWeightNode{
weight:branch.iter().map(|node|node.weight).sum(),
content:RecursiveContent::Branch(branch),
aabb:self.aabb,
}
},
}
}
}
impl <W,T> BvhWeightNode<W,T>{
pub const fn weight(&self)->&W{
&self.weight
}
pub const fn aabb(&self)->&Aabb{
&self.aabb
}
pub fn into_content(self)->RecursiveContent<BvhWeightNode<W,T>,T>{
self.content
}
pub fn into_visitor<F:FnMut(T)>(self,f:&mut F){
match self.content{
RecursiveContent::Leaf(model)=>f(model),
RecursiveContent::Branch(children)=>for child in children{
child.into_visitor(f)
},
}
}
}
pub fn generate_bvh<T>(boxen:Vec<(T,Aabb)>)->BvhNode<T>{
generate_bvh_node(boxen,false)
}
fn generate_bvh_node<T>(boxen:Vec<(T,Aabb)>,force:bool)->BvhNode<T>{
let n=boxen.len();
if force||n<20{
let mut aabb=Aabb::default();
let nodes=boxen.into_iter().map(|b|{
aabb.join(&b.1);
BvhNode{
content:RecursiveContent::Leaf(b.0),
aabb:b.1,
}
}).collect();
BvhNode{
content:RecursiveContent::Branch(nodes),
aabb,
}
}else{
let mut sort_x=Vec::with_capacity(n);
let mut sort_y=Vec::with_capacity(n);
let mut sort_z=Vec::with_capacity(n);
for (i,(_,aabb)) in boxen.iter().enumerate(){
let center=aabb.center();
sort_x.push((i,center.x));
sort_y.push((i,center.y));
sort_z.push((i,center.z));
}
sort_x.sort_by(|tup0,tup1|tup0.1.cmp(&tup1.1));
sort_y.sort_by(|tup0,tup1|tup0.1.cmp(&tup1.1));
sort_z.sort_by(|tup0,tup1|tup0.1.cmp(&tup1.1));
let h=n/2;
let median_x=sort_x[h].1;
let median_y=sort_y[h].1;
let median_z=sort_z[h].1;
//locate a run of values equal to the median
//partition point gives the first index for which the predicate evaluates to false
let first_index_eq_median_x=sort_x.partition_point(|&(_,x)|x<median_x);
let first_index_eq_median_y=sort_y.partition_point(|&(_,y)|y<median_y);
let first_index_eq_median_z=sort_z.partition_point(|&(_,z)|z<median_z);
let first_index_gt_median_x=sort_x.partition_point(|&(_,x)|x<=median_x);
let first_index_gt_median_y=sort_y.partition_point(|&(_,y)|y<=median_y);
let first_index_gt_median_z=sort_z.partition_point(|&(_,z)|z<=median_z);
//pick which side median value copies go into such that both sides are as balanced as possible based on distance from n/2
let partition_point_x=if n.abs_diff(2*first_index_eq_median_x)<n.abs_diff(2*first_index_gt_median_x){first_index_eq_median_x}else{first_index_gt_median_x};
let partition_point_y=if n.abs_diff(2*first_index_eq_median_y)<n.abs_diff(2*first_index_gt_median_y){first_index_eq_median_y}else{first_index_gt_median_y};
let partition_point_z=if n.abs_diff(2*first_index_eq_median_z)<n.abs_diff(2*first_index_gt_median_z){first_index_eq_median_z}else{first_index_gt_median_z};
//this ids which octant the boxen is put in
let mut octant=vec![0;n];
for &(i,_) in &sort_x[partition_point_x..]{
octant[i]+=1<<0;
}
for &(i,_) in &sort_y[partition_point_y..]{
octant[i]+=1<<1;
}
for &(i,_) in &sort_z[partition_point_z..]{
octant[i]+=1<<2;
}
//generate lists for unique octant values
let mut list_list=Vec::with_capacity(8);
let mut octant_list=Vec::with_capacity(8);
for (i,(data,aabb)) in boxen.into_iter().enumerate(){
let octant_id=octant[i];
let list_id=if let Some(list_id)=octant_list.iter().position(|&id|id==octant_id){
list_id
}else{
let list_id=list_list.len();
octant_list.push(octant_id);
list_list.push(Vec::new());
list_id
};
list_list[list_id].push((data,aabb));
}
let mut aabb=Aabb::default();
if list_list.len()==1{
generate_bvh_node(list_list.remove(0),true)
}else{
BvhNode{
content:RecursiveContent::Branch(
list_list.into_iter().map(|b|{
let node=generate_bvh_node(b,false);
aabb.join(&node.aabb);
node
}).collect()
),
aabb,
}
}
}
}

View File

@ -1,25 +0,0 @@
bitflags::bitflags!{
#[derive(Clone,Copy,Debug,Default)]
pub struct Controls:u32{
const MoveForward=1<<0;
const MoveLeft=1<<1;
const MoveBackward=1<<2;
const MoveRight=1<<3;
const MoveUp=1<<4;
const MoveDown=1<<5;
const LookUp=1<<6;
const LookLeft=1<<7;
const LookDown=1<<8;
const LookRight=1<<9;
const Jump=1<<10;
const Crouch=1<<11;
const Sprint=1<<12;
const Zoom=1<<13;
const Use=1<<14;//Interact with object
const PrimaryAction=1<<15;//LBM/Shoot/Melee
const SecondaryAction=1<<16;//RMB/ADS/Block
const WASD=Self::MoveForward.union(Self::MoveLeft).union(Self::MoveBackward).union(Self::MoveRight).bits();
const WASDQE=Self::MoveForward.union(Self::MoveLeft).union(Self::MoveBackward).union(Self::MoveRight).union(Self::MoveUp).union(Self::MoveDown).bits();
}
}

View File

@ -1,174 +0,0 @@
use crate::model;
use crate::integer::{AbsoluteTime,Planar64,Planar64Vec3};
//you have this effect while in contact
#[derive(Clone,Hash,Eq,PartialEq)]
pub struct ContactingLadder{
pub sticky:bool
}
#[derive(Clone,Hash,Eq,PartialEq)]
pub enum ContactingBehaviour{
Surf,
Ladder(ContactingLadder),
NoJump,
Cling,//usable as a zipline, or other weird and wonderful things
Elastic(u32),//[1/2^32,1] 0=None (elasticity+1)/2^32
}
//you have this effect while intersecting
#[derive(Clone,Hash,Eq,PartialEq)]
pub struct IntersectingWater{
pub viscosity:Planar64,
pub density:Planar64,
pub velocity:Planar64Vec3,
}
//All models can be given these attributes
#[derive(Clone,Hash,Eq,PartialEq)]
pub struct Accelerator{
pub acceleration:Planar64Vec3
}
#[derive(Clone,Hash,Eq,PartialEq)]
pub enum Booster{
//Affine(crate::integer::Planar64Affine3),//capable of SetVelocity,DotVelocity,normal booster,bouncy part,redirect velocity, and much more
Velocity(Planar64Vec3),//straight up boost velocity adds to your current velocity
Energy{direction:Planar64Vec3,energy:Planar64},//increase energy in direction
AirTime(AbsoluteTime),//increase airtime, invariant across mass and gravity changes
Height(Planar64),//increase height, invariant across mass and gravity changes
}
impl Booster{
pub fn boost(&self,velocity:Planar64Vec3)->Planar64Vec3{
match self{
&Booster::Velocity(boost_velocity)=>velocity+boost_velocity,
&Booster::Energy{..}=>{
todo!()
//let d=direction.dot(velocity);
//TODO: think about negative
//velocity+direction.with_length((d*d+energy).sqrt()-d)
},
Booster::AirTime(_)=>todo!(),
Booster::Height(_)=>todo!(),
}
}
}
#[derive(Clone,Hash,Eq,PartialEq)]
pub enum TrajectoryChoice{
HighArcLongDuration,//underhand lob at target: less horizontal speed and more air time
LowArcShortDuration,//overhand throw at target: more horizontal speed and less air time
}
#[derive(Clone,Hash,Eq,PartialEq)]
pub enum SetTrajectory{
//Speed-type SetTrajectory
AirTime(AbsoluteTime),//air time (relative to gravity direction) is invariant across mass and gravity changes
Height(Planar64),//boost height (relative to gravity direction) is invariant across mass and gravity changes
DotVelocity{direction:Planar64Vec3,dot:Planar64},//set your velocity in a specific direction without touching other directions
//Velocity-type SetTrajectory
TargetPointTime{//launch on a trajectory that will land at a target point in a set amount of time
target_point:Planar64Vec3,
time:AbsoluteTime,//short time = fast and direct, long time = launch high in the air, negative time = wrong way
},
TargetPointSpeed{//launch at a fixed speed and land at a target point
target_point:Planar64Vec3,
speed:Planar64,//if speed is too low this will fail to reach the target. The closest-passing trajectory will be chosen instead
trajectory_choice:TrajectoryChoice,
},
Velocity(Planar64Vec3),//SetVelocity
}
impl SetTrajectory{
pub const fn is_absolute(&self)->bool{
match self{
SetTrajectory::AirTime(_)
|SetTrajectory::Height(_)
|SetTrajectory::DotVelocity{direction:_,dot:_}=>false,
SetTrajectory::TargetPointTime{target_point:_,time:_}
|SetTrajectory::TargetPointSpeed{target_point:_,speed:_,trajectory_choice:_}
|SetTrajectory::Velocity(_)=>true,
}
}
}
// enum TrapCondition{
// FasterThan(Planar64),
// SlowerThan(Planar64),
// InRange(Planar64,Planar64),
// OutsideRange(Planar64,Planar64),
// }
#[derive(Clone,Hash,Eq,PartialEq)]
pub struct Wormhole{
//destination does not need to be another wormhole
//this defines a one way portal to a destination model transform
//two of these can create a two way wormhole
pub destination_model:model::ModelId,
//(position,angles)*=origin.transform.inverse()*destination.transform
}
//attributes listed in order of handling
#[derive(Default,Clone,Hash,Eq,PartialEq)]
pub struct GeneralAttributes{
pub booster:Option<Booster>,
pub trajectory:Option<SetTrajectory>,
pub wormhole:Option<Wormhole>,
pub accelerator:Option<Accelerator>,
}
impl GeneralAttributes{
pub const fn any(&self)->bool{
self.booster.is_some()
||self.trajectory.is_some()
||self.wormhole.is_some()
||self.accelerator.is_some()
}
pub fn is_wrcp(&self)->bool{
self.trajectory.as_ref().map_or(false,|t|t.is_absolute())
/*
&&match &self.teleport_behaviour{
Some(TeleportBehaviour::StageElement(
StageElement{
mode_id,
stage_id:_,
force:true,
behaviour:StageElementBehaviour::Trigger|StageElementBehaviour::Teleport
}
))=>current_mode_id==*mode_id,
_=>false,
}
*/
}
}
#[derive(Default,Clone,Hash,Eq,PartialEq)]
pub struct ContactingAttributes{
//friction?
pub contact_behaviour:Option<ContactingBehaviour>,
}
impl ContactingAttributes{
pub const fn any(&self)->bool{
self.contact_behaviour.is_some()
}
}
#[derive(Default,Clone,Hash,Eq,PartialEq)]
pub struct IntersectingAttributes{
pub water:Option<IntersectingWater>,
}
impl IntersectingAttributes{
pub const fn any(&self)->bool{
self.water.is_some()
}
}
#[derive(Clone,Copy,id::Id,Hash,Eq,PartialEq)]
pub struct CollisionAttributesId(u32);
#[derive(Clone,Default,Hash,Eq,PartialEq)]
pub struct ContactAttributes{
pub contacting:ContactingAttributes,
pub general:GeneralAttributes,
}
#[derive(Clone,Default,Hash,Eq,PartialEq)]
pub struct IntersectAttributes{
pub intersecting:IntersectingAttributes,
pub general:GeneralAttributes,
}
#[derive(Clone,Hash,Eq,PartialEq)]
pub enum CollisionAttributes{
Decoration,//visual only
Contact(ContactAttributes),//track whether you are contacting the object
Intersect(IntersectAttributes),//track whether you are intersecting the object
}
impl CollisionAttributes{
pub fn contact_default()->Self{
Self::Contact(ContactAttributes::default())
}
}

View File

@ -1,332 +0,0 @@
use std::collections::{HashSet,HashMap};
use crate::model::ModelId;
use crate::gameplay_style;
use crate::updatable::Updatable;
#[derive(Clone)]
pub struct StageElement{
stage_id:StageId,//which stage spawn to send to
force:bool,//allow setting to lower spawn id i.e. 7->3
behaviour:StageElementBehaviour,
jump_limit:Option<u8>,
}
impl StageElement{
#[inline]
pub const fn new(stage_id:StageId,force:bool,behaviour:StageElementBehaviour,jump_limit:Option<u8>)->Self{
Self{
stage_id,
force,
behaviour,
jump_limit,
}
}
#[inline]
pub const fn stage_id(&self)->StageId{
self.stage_id
}
#[inline]
pub const fn force(&self)->bool{
self.force
}
#[inline]
pub const fn behaviour(&self)->StageElementBehaviour{
self.behaviour
}
#[inline]
pub const fn jump_limit(&self)->Option<u8>{
self.jump_limit
}
}
#[derive(Clone,Copy,Hash,Eq,PartialEq)]
pub enum StageElementBehaviour{
SpawnAt,//must be standing on top to get effect. except cancollide false
Trigger,
Teleport,
Platform,
//Check(point) acts like a trigger if you haven't hit all the checkpoints on previous stages yet.
//Note that all stage elements act like this, this is just the isolated behaviour.
Check,
Checkpoint,//this is a combined behaviour for Ordered & Unordered in case a model is used multiple times or for both.
}
#[derive(Clone,Copy,Debug,Hash,id::Id,Eq,PartialEq)]
pub struct CheckpointId(u32);
impl CheckpointId{
pub const FIRST:Self=Self(0);
}
#[derive(Clone,Copy,Debug,Hash,id::Id,Eq,PartialEq,Ord,PartialOrd)]
pub struct StageId(u32);
impl StageId{
pub const FIRST:Self=Self(0);
}
#[derive(Clone)]
pub struct Stage{
spawn:ModelId,
//open world support lol
ordered_checkpoints_count:u32,
unordered_checkpoints_count:u32,
//currently loaded checkpoint models
ordered_checkpoints:HashMap<CheckpointId,ModelId>,
unordered_checkpoints:HashSet<ModelId>,
}
impl Stage{
pub fn new(
spawn:ModelId,
ordered_checkpoints_count:u32,
unordered_checkpoints_count:u32,
ordered_checkpoints:HashMap<CheckpointId,ModelId>,
unordered_checkpoints:HashSet<ModelId>,
)->Self{
Self{
spawn,
ordered_checkpoints_count,
unordered_checkpoints_count,
ordered_checkpoints,
unordered_checkpoints,
}
}
pub fn empty(spawn:ModelId)->Self{
Self{
spawn,
ordered_checkpoints_count:0,
unordered_checkpoints_count:0,
ordered_checkpoints:HashMap::new(),
unordered_checkpoints:HashSet::new(),
}
}
#[inline]
pub const fn spawn(&self)->ModelId{
self.spawn
}
#[inline]
pub const fn ordered_checkpoints_count(&self)->u32{
self.ordered_checkpoints_count
}
#[inline]
pub const fn unordered_checkpoints_count(&self)->u32{
self.unordered_checkpoints_count
}
pub fn into_inner(self)->(HashMap<CheckpointId,ModelId>,HashSet<ModelId>){
(self.ordered_checkpoints,self.unordered_checkpoints)
}
/// Returns true if the stage has no checkpoints.
#[inline]
pub const fn is_empty(&self)->bool{
self.is_complete(0,0)
}
#[inline]
pub const fn is_complete(&self,ordered_checkpoints_count:u32,unordered_checkpoints_count:u32)->bool{
self.ordered_checkpoints_count==ordered_checkpoints_count&&self.unordered_checkpoints_count==unordered_checkpoints_count
}
#[inline]
pub fn is_next_ordered_checkpoint(&self,next_ordered_checkpoint_id:CheckpointId,model_id:ModelId)->bool{
self.ordered_checkpoints.get(&next_ordered_checkpoint_id).is_some_and(|&next_checkpoint|model_id==next_checkpoint)
}
#[inline]
pub fn is_unordered_checkpoint(&self,model_id:ModelId)->bool{
self.unordered_checkpoints.contains(&model_id)
}
}
#[derive(Default)]
pub struct StageUpdate{
//other behaviour models of this stage can have
ordered_checkpoints:HashMap<CheckpointId,ModelId>,
unordered_checkpoints:HashSet<ModelId>,
}
impl Updatable<StageUpdate> for Stage{
fn update(&mut self,update:StageUpdate){
self.ordered_checkpoints.extend(update.ordered_checkpoints);
self.unordered_checkpoints.extend(update.unordered_checkpoints);
}
}
#[derive(Clone,Copy,Hash,Eq,PartialEq)]
pub enum Zone{
Start,
Finish,
Anticheat,
}
#[derive(Clone,Copy,Debug,Hash,id::Id,Eq,PartialEq,Ord,PartialOrd)]
pub struct ModeId(u32);
impl ModeId{
pub const MAIN:Self=Self(0);
pub const BONUS:Self=Self(1);
}
#[derive(Clone)]
pub struct Mode{
style:gameplay_style::StyleModifiers,
start:ModelId,//when you press reset you go here
zones:HashMap<ModelId,Zone>,
stages:Vec<Stage>,//when you load the map you go to stages[0].spawn
//mutually exlusive stage element behaviour
elements:HashMap<ModelId,StageElement>,
}
impl Mode{
pub fn new(
style:gameplay_style::StyleModifiers,
start:ModelId,
zones:HashMap<ModelId,Zone>,
stages:Vec<Stage>,
elements:HashMap<ModelId,StageElement>,
)->Self{
Self{
style,
start,
zones,
stages,
elements,
}
}
pub fn empty(style:gameplay_style::StyleModifiers,start:ModelId)->Self{
Self{
style,
start,
zones:HashMap::new(),
stages:Vec::new(),
elements:HashMap::new(),
}
}
pub fn into_inner(self)->(
gameplay_style::StyleModifiers,
ModelId,
HashMap<ModelId,Zone>,
Vec<Stage>,
HashMap<ModelId,StageElement>,
){
(
self.style,
self.start,
self.zones,
self.stages,
self.elements,
)
}
pub const fn get_start(&self)->ModelId{
self.start
}
pub const fn get_style(&self)->&gameplay_style::StyleModifiers{
&self.style
}
pub fn push_stage(&mut self,stage:Stage){
self.stages.push(stage)
}
pub fn get_stage_mut(&mut self,stage:StageId)->Option<&mut Stage>{
self.stages.get_mut(stage.0 as usize)
}
pub fn get_spawn_model_id(&self,stage:StageId)->Option<ModelId>{
self.stages.get(stage.0 as usize).map(|s|s.spawn)
}
pub fn get_zone(&self,model_id:ModelId)->Option<&Zone>{
self.zones.get(&model_id)
}
pub fn get_stage(&self,stage_id:StageId)->Option<&Stage>{
self.stages.get(stage_id.0 as usize)
}
pub fn get_element(&self,model_id:ModelId)->Option<&StageElement>{
self.elements.get(&model_id)
}
//TODO: put this in the SNF
pub fn denormalize_data(&mut self){
//expand and index normalized data
self.zones.insert(self.start,Zone::Start);
for (stage_id,stage) in self.stages.iter().enumerate(){
self.elements.insert(stage.spawn,StageElement{
stage_id:StageId(stage_id as u32),
force:false,
behaviour:StageElementBehaviour::SpawnAt,
jump_limit:None,
});
for (_,&model) in &stage.ordered_checkpoints{
self.elements.insert(model,StageElement{
stage_id:StageId(stage_id as u32),
force:false,
behaviour:StageElementBehaviour::Checkpoint,
jump_limit:None,
});
}
for &model in &stage.unordered_checkpoints{
self.elements.insert(model,StageElement{
stage_id:StageId(stage_id as u32),
force:false,
behaviour:StageElementBehaviour::Checkpoint,
jump_limit:None,
});
}
}
}
}
//this would be nice as a macro
#[derive(Default)]
pub struct ModeUpdate{
zones:HashMap<ModelId,Zone>,
stages:HashMap<StageId,StageUpdate>,
//mutually exlusive stage element behaviour
elements:HashMap<ModelId,StageElement>,
}
impl Updatable<ModeUpdate> for Mode{
fn update(&mut self,update:ModeUpdate){
self.zones.extend(update.zones);
for (stage,stage_update) in update.stages{
if let Some(stage)=self.stages.get_mut(stage.0 as usize){
stage.update(stage_update);
}
}
self.elements.extend(update.elements);
}
}
impl ModeUpdate{
pub fn zone(model_id:ModelId,zone:Zone)->Self{
let mut mu=Self::default();
mu.zones.insert(model_id,zone);
mu
}
pub fn stage(stage_id:StageId,stage_update:StageUpdate)->Self{
let mut mu=Self::default();
mu.stages.insert(stage_id,stage_update);
mu
}
pub fn element(model_id:ModelId,element:StageElement)->Self{
let mut mu=Self::default();
mu.elements.insert(model_id,element);
mu
}
pub fn map_stage_element_ids<F:Fn(StageId)->StageId>(&mut self,f:F){
for (_,stage_element) in self.elements.iter_mut(){
stage_element.stage_id=f(stage_element.stage_id);
}
}
}
#[derive(Default,Clone)]
pub struct Modes{
pub modes:Vec<Mode>,
}
impl Modes{
pub const fn new(modes:Vec<Mode>)->Self{
Self{
modes,
}
}
pub fn into_inner(self)->Vec<Mode>{
self.modes
}
pub fn push_mode(&mut self,mode:Mode){
self.modes.push(mode)
}
pub fn get_mode(&self,mode:ModeId)->Option<&Mode>{
self.modes.get(mode.0 as usize)
}
}
pub struct ModesUpdate{
modes:HashMap<ModeId,ModeUpdate>,
}
impl Updatable<ModesUpdate> for Modes{
fn update(&mut self,update:ModesUpdate){
for (mode,mode_update) in update.modes{
if let Some(mode)=self.modes.get_mut(mode.0 as usize){
mode.update(mode_update);
}
}
}
}

View File

@ -1,612 +0,0 @@
const VALVE_SCALE:Planar64=Planar64::raw(1<<28);// 1/16
use crate::integer::{int,vec3::int as int3,AbsoluteTime,Ratio64,Planar64,Planar64Vec3};
use crate::controls_bitflag::Controls;
use crate::physics::Time as PhysicsTime;
#[derive(Clone,Debug)]
pub struct StyleModifiers{
//controls which are allowed to pass into gameplay (usually all)
pub controls_mask:Controls,
//controls which are masked from control state (e.g. !jump in scroll style)
pub controls_mask_state:Controls,
//strafing
pub strafe:Option<StrafeSettings>,
//player gets a controllable rocket force
pub rocket:Option<PropulsionSettings>,
//flying
//pub move_type:MoveType::Fly(FlySettings)
//MoveType::Physics(PhysicsSettings) -> PhysicsSettings (strafe,rocket,jump,walk,ladder,swim,gravity)
//jumping is allowed
pub jump:Option<JumpSettings>,
//standing & walking is allowed
pub walk:Option<WalkSettings>,
//laddering is allowed
pub ladder:Option<LadderSettings>,
//water propulsion
pub swim:Option<PropulsionSettings>,
//maximum slope before sloped surfaces become frictionless
pub gravity:Planar64Vec3,
//hitbox
pub hitbox:Hitbox,
//camera location relative to the center (0,0,0) of the hitbox
pub camera_offset:Planar64Vec3,
//unused
pub mass:Planar64,
}
impl std::default::Default for StyleModifiers{
fn default()->Self{
Self::roblox_bhop()
}
}
#[derive(Clone,Debug)]
pub enum JumpCalculation{
Max,//Roblox: jumped_speed=max(velocity.boost(),velocity.jump())
BoostThenJump,//jumped_speed=velocity.boost().jump()
JumpThenBoost,//jumped_speed=velocity.jump().boost()
}
#[derive(Clone,Debug)]
pub enum JumpImpulse{
Time(AbsoluteTime),//jump time is invariant across mass and gravity changes
Height(Planar64),//jump height is invariant across mass and gravity changes
Linear(Planar64),//jump velocity is invariant across mass and gravity changes
Energy(Planar64),// :)
}
//Jumping acts on dot(walks_state.normal,body.velocity)
//Energy means it adds energy
//Linear means it linearly adds on
impl JumpImpulse{
pub fn jump(
&self,
velocity:Planar64Vec3,
jump_dir:Planar64Vec3,
gravity:&Planar64Vec3,
mass:Planar64,
)->Planar64Vec3{
match self{
&JumpImpulse::Time(time)=>velocity-(*gravity*time).map(|t|t.divide().fix_1()),
&JumpImpulse::Height(height)=>{
//height==-v.y*v.y/(2*g.y);
//use energy to determine max height
let gg=gravity.length_squared();
let g=gg.sqrt().fix_1();
let v_g=gravity.dot(velocity);
//do it backwards
let radicand=v_g*v_g+(g*height*2).fix_4();
velocity-(*gravity*(radicand.sqrt().fix_2()+v_g)/gg).divide().fix_1()
},
&JumpImpulse::Linear(jump_speed)=>velocity+(jump_dir*jump_speed/jump_dir.length()).divide().fix_1(),
&JumpImpulse::Energy(energy)=>{
//calculate energy
//let e=gravity.dot(velocity);
//add
//you get the idea
todo!()
},
}
}
//TODO: remove this and implement JumpCalculation properly
pub fn get_jump_deltav(&self,gravity:&Planar64Vec3,mass:Planar64)->Planar64{
//gravity.length() is actually the proper calculation because the jump is always opposite the gravity direction
match self{
&JumpImpulse::Time(time)=>(gravity.length().fix_1()*time/2).divide().fix_1(),
&JumpImpulse::Height(height)=>(gravity.length()*height*2).sqrt().fix_1(),
&JumpImpulse::Linear(deltav)=>deltav,
&JumpImpulse::Energy(energy)=>(energy.sqrt()*2/mass.sqrt()).divide().fix_1(),
}
}
}
#[derive(Clone,Debug)]
pub struct JumpSettings{
//information used to calculate jump power
pub impulse:JumpImpulse,
//information used to calculate jump behaviour
pub calculation:JumpCalculation,
//limit the minimum jump power when combined with downwards momentum
//This is true in both roblox and source
pub limit_minimum:bool,
}
impl JumpSettings{
pub fn jumped_velocity(
&self,
style:&StyleModifiers,
jump_dir:Planar64Vec3,
rel_velocity:Planar64Vec3,
booster:Option<&crate::gameplay_attributes::Booster>,
)->Planar64Vec3{
let jump_speed=self.impulse.get_jump_deltav(&style.gravity,style.mass);
match (self.limit_minimum,&self.calculation){
(true,JumpCalculation::Max)=>{
//the roblox calculation
let boost_vel=match booster{
Some(booster)=>booster.boost(rel_velocity),
None=>rel_velocity,
};
let j=boost_vel.dot(jump_dir);
let js=jump_speed.fix_2();
if j<js{
//weak booster: just do a regular jump
boost_vel+jump_dir.with_length(js-j).divide().fix_1()
}else{
//activate booster normally, jump does nothing
boost_vel
}
},
(true,_)=>{
//the source calculation (?)
let boost_vel=match booster{
Some(booster)=>booster.boost(rel_velocity),
None=>rel_velocity,
};
let j=boost_vel.dot(jump_dir);
let js=jump_speed.fix_2();
if j<js{
//speed in direction of jump cannot be lower than amount
boost_vel+jump_dir.with_length(js-j).divide().fix_1()
}else{
//boost and jump add together
boost_vel+jump_dir.with_length(js).divide().fix_1()
}
}
(false,JumpCalculation::Max)=>{
//??? calculation
//max(boost_vel,jump_vel)
let boost_vel=match booster{
Some(booster)=>booster.boost(rel_velocity),
None=>rel_velocity,
};
let boost_dot=boost_vel.dot(jump_dir);
let js=jump_speed.fix_2();
if boost_dot<js{
//weak boost is extended to jump speed
boost_vel+jump_dir.with_length(js-boost_dot).divide().fix_1()
}else{
//activate booster normally, jump does nothing
boost_vel
}
},
//the strafe client calculation
(false,_)=>{
let boost_vel=match booster{
Some(booster)=>booster.boost(rel_velocity),
None=>rel_velocity,
};
boost_vel+jump_dir.with_length(jump_speed).divide().fix_1()
},
}
}
}
#[derive(Clone,Debug)]
pub struct ControlsActivation{
//allowed keys
pub controls_mask:Controls,
//allow strafing only if any of the masked controls are held, eg W|S for shsw
pub controls_intersects:Controls,
//allow strafing only if all of the masked controls are held, eg W for hsw, w-only
pub controls_contains:Controls,
//Function(Box<dyn Fn(u32)->bool>),
}
impl ControlsActivation{
pub const fn mask(&self,controls:Controls)->Controls{
controls.intersection(self.controls_mask)
}
pub const fn activates(&self,controls:Controls)->bool{
(self.controls_intersects.is_empty()||controls.intersects(self.controls_intersects))
&&controls.contains(self.controls_contains)
}
pub const fn full_3d()->Self{
Self{
controls_mask:Controls::WASDQE,
controls_intersects:Controls::WASDQE,
controls_contains:Controls::empty(),
}
}
//classical styles
//Normal
pub const fn full_2d()->Self{
Self{
controls_mask:Controls::WASD,
controls_intersects:Controls::WASD,
controls_contains:Controls::empty(),
}
}
//Sideways
pub const fn sideways()->Self{
Self{
controls_mask:Controls::MoveForward.union(Controls::MoveBackward),
controls_intersects:Controls::MoveForward.union(Controls::MoveBackward),
controls_contains:Controls::empty(),
}
}
//Half-Sideways
pub const fn half_sideways()->Self{
Self{
controls_mask:Controls::MoveForward.union(Controls::MoveLeft).union(Controls::MoveRight),
controls_intersects:Controls::MoveLeft.union(Controls::MoveRight),
controls_contains:Controls::MoveForward,
}
}
//Surf Half-Sideways
pub const fn surf_half_sideways()->Self{
Self{
controls_mask:Controls::MoveForward.union(Controls::MoveBackward).union(Controls::MoveLeft).union(Controls::MoveRight),
controls_intersects:Controls::MoveForward.union(Controls::MoveBackward),
controls_contains:Controls::empty(),
}
}
//W-Only
pub const fn w_only()->Self{
Self{
controls_mask:Controls::MoveForward,
controls_intersects:Controls::empty(),
controls_contains:Controls::MoveForward,
}
}
//A-Only
pub const fn a_only()->Self{
Self{
controls_mask:Controls::MoveLeft,
controls_intersects:Controls::empty(),
controls_contains:Controls::MoveLeft,
}
}
//Backwards
}
#[derive(Clone,Debug)]
pub struct StrafeSettings{
pub enable:ControlsActivation,
pub mv:Planar64,
pub air_accel_limit:Option<Planar64>,
pub tick_rate:Ratio64,
}
impl StrafeSettings{
pub fn tick_velocity(&self,velocity:Planar64Vec3,control_dir:Planar64Vec3)->Option<Planar64Vec3>{
let d=velocity.dot(control_dir);
let mv=self.mv.fix_2();
match d<mv{
true=>Some(velocity+(control_dir*self.air_accel_limit.map_or(mv-d,|limit|limit.fix_2().min(mv-d))).fix_1()),
false=>None,
}
}
pub fn next_tick(&self,time:PhysicsTime)->PhysicsTime{
PhysicsTime::from_nanos(self.tick_rate.rhs_div_int(self.tick_rate.mul_int(time.nanos())+1))
}
pub const fn activates(&self,controls:Controls)->bool{
self.enable.activates(controls)
}
pub const fn mask(&self,controls:Controls)->Controls{
self.enable.mask(controls)
}
}
#[derive(Clone,Debug)]
pub struct PropulsionSettings{
pub magnitude:Planar64,
}
impl PropulsionSettings{
pub fn acceleration(&self,control_dir:Planar64Vec3)->Planar64Vec3{
(control_dir*self.magnitude).fix_1()
}
}
#[derive(Clone,Debug)]
pub struct AccelerateSettings{
pub accel:Planar64,
pub topspeed:Planar64,
}
#[derive(Clone,Debug)]
pub struct WalkSettings{
pub accelerate:AccelerateSettings,
pub static_friction:Planar64,
pub kinetic_friction:Planar64,
//if a surf slope angle does not exist, then everything is slippery and walking is impossible
pub surf_dot:Planar64,//surf_dot<n.dot(up)/n.length()
}
impl WalkSettings{
pub fn accel(&self,target_diff:Planar64Vec3,gravity:Planar64Vec3)->Planar64{
//TODO: fallible walk accel
let diff_len=target_diff.length().fix_1();
let friction=if diff_len<self.accelerate.topspeed{
self.static_friction
}else{
self.kinetic_friction
};
self.accelerate.accel.min((-gravity.y*friction).fix_1())
}
pub fn get_walk_target_velocity(&self,control_dir:Planar64Vec3,normal:Planar64Vec3)->Planar64Vec3{
if control_dir==crate::integer::vec3::ZERO{
return control_dir;
}
let nn=normal.length_squared();
let mm=control_dir.length_squared();
let nnmm=nn*mm;
let d=normal.dot(control_dir);
let dd=d*d;
if dd<nnmm{
let cr=normal.cross(control_dir);
if cr==crate::integer::vec3::ZERO_2{
crate::integer::vec3::ZERO
}else{
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().fix_1()
}
}else{
crate::integer::vec3::ZERO
}
}
pub fn is_slope_walkable(&self,normal:Planar64Vec3,up:Planar64Vec3)->bool{
//normal is not guaranteed to be unit length
let ny=normal.dot(up);
let h=normal.length().fix_1();
//remember this is a normal vector
ny.is_positive()&&h*self.surf_dot<ny
}
}
#[derive(Clone,Debug)]
pub struct LadderSettings{
pub accelerate:AccelerateSettings,
//how close to pushing directly into/out of the ladder normal
//does your input need to be to redirect straight up/down the ladder
pub dot:Planar64,
}
impl LadderSettings{
pub const fn accel(&self,target_diff:Planar64Vec3,gravity:Planar64Vec3)->Planar64{
//TODO: fallible ladder accel
self.accelerate.accel
}
pub fn get_ladder_target_velocity(&self,mut control_dir:Planar64Vec3,normal:Planar64Vec3)->Planar64Vec3{
if control_dir==crate::integer::vec3::ZERO{
return control_dir;
}
let nn=normal.length_squared();
let mm=control_dir.length_squared();
let nnmm=nn*mm;
let d=normal.dot(control_dir);
let mut dd=d*d;
if (self.dot*self.dot*nnmm).fix_4()<dd{
if d.is_negative(){
control_dir=Planar64Vec3::new([Planar64::ZERO,mm.fix_1(),Planar64::ZERO]);
}else{
control_dir=Planar64Vec3::new([Planar64::ZERO,-mm.fix_1(),Planar64::ZERO]);
}
dd=(normal.y*normal.y).fix_4();
}
//n=d if you are standing on top of a ladder and press E.
//two fixes:
//- ladder movement is not allowed on walkable surfaces
//- fix the underlying issue
if dd<nnmm{
let cr=normal.cross(control_dir);
if cr==crate::integer::vec3::ZERO_2{
crate::integer::vec3::ZERO
}else{
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().fix_1()
}
}else{
crate::integer::vec3::ZERO
}
}
}
#[derive(Clone,Debug)]
pub enum HitboxMesh{
Box,//source
Cylinder,//roblox
//Sphere,//roblox old physics
//Point,
//Line,
//DualCone,
}
#[derive(Clone,Debug)]
pub struct Hitbox{
pub halfsize:Planar64Vec3,
pub mesh:HitboxMesh,
}
impl Hitbox{
pub fn roblox()->Self{
Self{
halfsize:int3(2,5,2)>>1,
mesh:HitboxMesh::Cylinder,
}
}
pub fn source()->Self{
Self{
halfsize:((int3(33,73,33)>>1)*VALVE_SCALE).fix_1(),
mesh:HitboxMesh::Box,
}
}
}
impl StyleModifiers{
pub const RIGHT_DIR:Planar64Vec3=crate::integer::vec3::X;
pub const UP_DIR:Planar64Vec3=crate::integer::vec3::Y;
pub const FORWARD_DIR:Planar64Vec3=crate::integer::vec3::NEG_Z;
pub fn neo()->Self{
Self{
controls_mask:Controls::all(),
controls_mask_state:Controls::all(),
strafe:Some(StrafeSettings{
enable:ControlsActivation::full_2d(),
air_accel_limit:None,
mv:int(3),
tick_rate:Ratio64::new(64,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}),
jump:Some(JumpSettings{
impulse:JumpImpulse::Energy(int(512)),
calculation:JumpCalculation::JumpThenBoost,
limit_minimum:false,
}),
gravity:int3(0,-80,0),
mass:int(1),
rocket:None,
walk:Some(WalkSettings{
accelerate:AccelerateSettings{
topspeed:int(16),
accel:int(80),
},
static_friction:int(2),
kinetic_friction:int(3),//unrealistic: kinetic friction is typically lower than static
surf_dot:int(3)/4,
}),
ladder:Some(LadderSettings{
accelerate:AccelerateSettings{
topspeed:int(16),
accel:int(160),
},
dot:(int(1)/2).sqrt(),
}),
swim:Some(PropulsionSettings{
magnitude:int(12),
}),
hitbox:Hitbox::roblox(),
camera_offset:int3(0,2,0),//4.5-2.5=2
}
}
pub fn roblox_bhop()->Self{
Self{
controls_mask:Controls::all(),
controls_mask_state:Controls::all(),
strafe:Some(StrafeSettings{
enable:ControlsActivation::full_2d(),
air_accel_limit:None,
mv:int(27)/10,
tick_rate:Ratio64::new(100,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}),
jump:Some(JumpSettings{
impulse:JumpImpulse::Time(AbsoluteTime::from_micros(715_588)),
calculation:JumpCalculation::Max,
limit_minimum:true,
}),
gravity:int3(0,-100,0),
mass:int(1),
rocket:None,
walk:Some(WalkSettings{
accelerate:AccelerateSettings{
topspeed:int(18),
accel:int(90),
},
static_friction:int(2),
kinetic_friction:int(3),//unrealistic: kinetic friction is typically lower than static
surf_dot:int(3)/4,// normal.y=0.75
}),
ladder:Some(LadderSettings{
accelerate:AccelerateSettings{
topspeed:int(18),
accel:int(180),
},
dot:(int(1)/2).sqrt(),
}),
swim:Some(PropulsionSettings{
magnitude:int(12),
}),
hitbox:Hitbox::roblox(),
camera_offset:int3(0,2,0),//4.5-2.5=2
}
}
pub fn roblox_surf()->Self{
Self{
gravity:int3(0,-50,0),
..Self::roblox_bhop()
}
}
pub fn roblox_rocket()->Self{
Self{
strafe:None,
rocket:Some(PropulsionSettings{
magnitude:int(200),
}),
..Self::roblox_bhop()
}
}
pub fn source_bhop()->Self{
Self{
controls_mask:Controls::all()-Controls::MoveUp-Controls::MoveDown,
controls_mask_state:Controls::all(),
strafe:Some(StrafeSettings{
enable:ControlsActivation::full_2d(),
air_accel_limit:Some(Planar64::raw(150<<28)*100),
mv:(Planar64::raw(30)*VALVE_SCALE).fix_1(),
tick_rate:Ratio64::new(100,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}),
jump:Some(JumpSettings{
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).fix_1()),
calculation:JumpCalculation::JumpThenBoost,
limit_minimum:true,
}),
gravity:(int3(0,-800,0)*VALVE_SCALE).fix_1(),
mass:int(1),
rocket:None,
walk:Some(WalkSettings{
accelerate:AccelerateSettings{
topspeed:int(18),//?
accel:int(90),//?
},
static_friction:int(2),//?
kinetic_friction:int(3),//?
surf_dot:int(3)/4,// normal.y=0.75
}),
ladder:Some(LadderSettings{
accelerate:AccelerateSettings{
topspeed:int(18),//?
accel:int(180),//?
},
dot:(int(1)/2).sqrt(),//?
}),
swim:Some(PropulsionSettings{
magnitude:int(12),//?
}),
hitbox:Hitbox::source(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).fix_1(),
}
}
pub fn source_surf()->Self{
Self{
controls_mask:Controls::all()-Controls::MoveUp-Controls::MoveDown,
controls_mask_state:Controls::all(),
strafe:Some(StrafeSettings{
enable:ControlsActivation::full_2d(),
air_accel_limit:Some((int(150)*66*VALVE_SCALE).fix_1()),
mv:(int(30)*VALVE_SCALE).fix_1(),
tick_rate:Ratio64::new(66,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}),
jump:Some(JumpSettings{
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).fix_1()),
calculation:JumpCalculation::JumpThenBoost,
limit_minimum:true,
}),
gravity:(int3(0,-800,0)*VALVE_SCALE).fix_1(),
mass:int(1),
rocket:None,
walk:Some(WalkSettings{
accelerate:AccelerateSettings{
topspeed:int(18),//?
accel:int(90),//?
},
static_friction:int(2),//?
kinetic_friction:int(3),//?
surf_dot:int(3)/4,// normal.y=0.75
}),
ladder:Some(LadderSettings{
accelerate:AccelerateSettings{
topspeed:int(18),//?
accel:int(180),//?
},
dot:(int(1)/2).sqrt(),//?
}),
swim:Some(PropulsionSettings{
magnitude:int(12),//?
}),
hitbox:Hitbox::source(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).fix_1(),
}
}
}

View File

@ -1,75 +0,0 @@
use crate::integer::Time;
#[derive(Debug)]
pub struct TimedInstruction<I,T>{
pub time:Time<T>,
pub instruction:I,
}
/// Ensure all emitted instructions are processed before consuming external instructions
pub trait InstructionEmitter<I>{
type TimeInner;
fn next_instruction(&self,time_limit:Time<Self::TimeInner>)->Option<TimedInstruction<I,Self::TimeInner>>;
}
/// Apply an atomic state update
pub trait InstructionConsumer<I>{
type TimeInner;
fn process_instruction(&mut self,instruction:TimedInstruction<I,Self::TimeInner>);
}
/// If the object produces its own instructions, allow exhaustively feeding them back in
pub trait InstructionFeedback<I,T>:InstructionEmitter<I,TimeInner=T>+InstructionConsumer<I,TimeInner=T>
where
Time<T>:Copy,
{
fn process_exhaustive(&mut self,time_limit:Time<T>){
while let Some(instruction)=self.next_instruction(time_limit){
self.process_instruction(instruction);
}
}
}
impl<I,T,X> InstructionFeedback<I,T> for X
where
Time<T>:Copy,
X:InstructionEmitter<I,TimeInner=T>+InstructionConsumer<I,TimeInner=T>,
{}
//PROPER PRIVATE FIELDS!!!
pub struct InstructionCollector<I,T>{
time:Time<T>,
instruction:Option<I>,
}
impl<I,T> InstructionCollector<I,T>
where Time<T>:Copy+PartialOrd,
{
pub const fn new(time:Time<T>)->Self{
Self{
time,
instruction:None
}
}
#[inline]
pub const fn time(&self)->Time<T>{
self.time
}
pub fn collect(&mut self,instruction:Option<TimedInstruction<I,T>>){
match instruction{
Some(unwrap_instruction)=>{
if unwrap_instruction.time<self.time {
self.time=unwrap_instruction.time;
self.instruction=Some(unwrap_instruction.instruction);
}
},
None=>(),
}
}
pub fn instruction(self)->Option<TimedInstruction<I,T>>{
//STEAL INSTRUCTION AND DESTROY INSTRUCTIONCOLLECTOR
match self.instruction{
Some(instruction)=>Some(TimedInstruction{
time:self.time,
instruction
}),
None=>None,
}
}
}

View File

@ -1,679 +0,0 @@
pub use fixed_wide::fixed::{Fixed,Fix};
pub use ratio_ops::ratio::{Ratio,Divide};
//integer units
/// specific example of a "default" time type
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{}
pub type AbsoluteTime=Time<TimeInner>;
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub struct Time<T>(i64,core::marker::PhantomData<T>);
impl<T> Time<T>{
pub const MIN:Self=Self::raw(i64::MIN);
pub const MAX:Self=Self::raw(i64::MAX);
pub const ZERO:Self=Self::raw(0);
pub const ONE_SECOND:Self=Self::raw(1_000_000_000);
pub const ONE_MILLISECOND:Self=Self::raw(1_000_000);
pub const ONE_MICROSECOND:Self=Self::raw(1_000);
pub const ONE_NANOSECOND:Self=Self::raw(1);
#[inline]
pub const fn raw(num:i64)->Self{
Self(num,core::marker::PhantomData)
}
#[inline]
pub const fn get(self)->i64{
self.0
}
#[inline]
pub const fn from_secs(num:i64)->Self{
Self::raw(Self::ONE_SECOND.0*num)
}
#[inline]
pub const fn from_millis(num:i64)->Self{
Self::raw(Self::ONE_MILLISECOND.0*num)
}
#[inline]
pub const fn from_micros(num:i64)->Self{
Self::raw(Self::ONE_MICROSECOND.0*num)
}
#[inline]
pub const fn from_nanos(num:i64)->Self{
Self::raw(Self::ONE_NANOSECOND.0*num)
}
//should I have checked subtraction? force all time variables to be positive?
#[inline]
pub const fn nanos(self)->i64{
self.0
}
#[inline]
pub const fn to_ratio(self)->Ratio<Planar64,Planar64>{
Ratio::new(Planar64::raw(self.0),Planar64::raw(1_000_000_000))
}
#[inline]
pub const fn coerce<U>(self)->Time<U>{
Time::raw(self.0)
}
}
impl<T> From<Planar64> for Time<T>{
#[inline]
fn from(value:Planar64)->Self{
Self::raw((value*Planar64::raw(1_000_000_000)).fix_1().to_raw())
}
}
impl<T,Num,Den,N1,T1> From<Ratio<Num,Den>> for Time<T>
where
Num:core::ops::Mul<Planar64,Output=N1>,
N1:Divide<Den,Output=T1>,
T1:Fix<Planar64>,
{
#[inline]
fn from(value:Ratio<Num,Den>)->Self{
Self::raw((value*Planar64::raw(1_000_000_000)).divide().fix().to_raw())
}
}
impl<T> std::fmt::Display for Time<T>{
#[inline]
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{}s+{:09}ns",self.0/Self::ONE_SECOND.0,self.0%Self::ONE_SECOND.0)
}
}
impl<T> std::default::Default for Time<T>{
fn default()->Self{
Self::raw(0)
}
}
impl<T> std::ops::Neg for Time<T>{
type Output=Self;
#[inline]
fn neg(self)->Self::Output {
Self::raw(-self.0)
}
}
macro_rules! impl_time_additive_operator {
($trait:ty, $method:ident) => {
impl<T> $trait for Time<T>{
type Output=Self;
#[inline]
fn $method(self,rhs:Self)->Self::Output {
Self::raw(self.0.$method(rhs.0))
}
}
};
}
impl_time_additive_operator!(core::ops::Add,add);
impl_time_additive_operator!(core::ops::Sub,sub);
impl_time_additive_operator!(core::ops::Rem,rem);
macro_rules! impl_time_additive_assign_operator {
($trait:ty, $method:ident) => {
impl<T> $trait for Time<T>{
#[inline]
fn $method(&mut self,rhs:Self){
self.0.$method(rhs.0)
}
}
};
}
impl_time_additive_assign_operator!(core::ops::AddAssign,add_assign);
impl_time_additive_assign_operator!(core::ops::SubAssign,sub_assign);
impl_time_additive_assign_operator!(core::ops::RemAssign,rem_assign);
impl<T> std::ops::Mul for Time<T>{
type Output=Ratio<fixed_wide::fixed::Fixed<2,64>,fixed_wide::fixed::Fixed<2,64>>;
#[inline]
fn mul(self,rhs:Self)->Self::Output{
Ratio::new(Fixed::raw(self.0)*Fixed::raw(rhs.0),Fixed::raw_digit(1_000_000_000i64.pow(2)))
}
}
impl<T> std::ops::Div<i64> for Time<T>{
type Output=Self;
#[inline]
fn div(self,rhs:i64)->Self::Output{
Self::raw(self.0/rhs)
}
}
impl<T> std::ops::Mul<i64> for Time<T>{
type Output=Self;
#[inline]
fn mul(self,rhs:i64)->Self::Output{
Self::raw(self.0*rhs)
}
}
impl<T> core::ops::Mul<Time<T>> for Planar64{
type Output=Ratio<Fixed<2,64>,Planar64>;
fn mul(self,rhs:Time<T>)->Self::Output{
Ratio::new(self*Fixed::raw(rhs.0),Planar64::raw(1_000_000_000))
}
}
#[cfg(test)]
mod test_time{
use super::*;
type Time=super::AbsoluteTime;
#[test]
fn time_from_planar64(){
let a:Time=Planar64::from(1).into();
assert_eq!(a,Time::ONE_SECOND);
}
#[test]
fn time_from_ratio(){
let a:Time=Ratio::new(Planar64::from(1),Planar64::from(1)).into();
assert_eq!(a,Time::ONE_SECOND);
}
#[test]
fn time_squared(){
let a=Time::from_secs(2);
assert_eq!(a*a,Ratio::new(Fixed::<2,64>::raw_digit(1_000_000_000i64.pow(2))*4,Fixed::<2,64>::raw_digit(1_000_000_000i64.pow(2))));
}
#[test]
fn time_times_planar64(){
let a=Time::from_secs(2);
let b=Planar64::from(2);
assert_eq!(b*a,Ratio::new(Fixed::<2,64>::raw_digit(1_000_000_000*(1<<32))<<2,Fixed::<1,32>::raw_digit(1_000_000_000)));
}
}
#[inline]
const fn gcd(mut a:u64,mut b:u64)->u64{
while b!=0{
(a,b)=(b,a.rem_euclid(b));
};
a
}
#[derive(Clone,Copy,Debug,Hash)]
pub struct Ratio64{
num:i64,
den:u64,
}
impl Ratio64{
pub const ZERO:Self=Ratio64{num:0,den:1};
pub const ONE:Self=Ratio64{num:1,den:1};
#[inline]
pub const fn new(num:i64,den:u64)->Option<Ratio64>{
if den==0{
None
}else{
let d=gcd(num.unsigned_abs(),den);
Some(Self{num:num/(d as i64),den:den/d})
}
}
#[inline]
pub const fn num(self)->i64{
self.num
}
#[inline]
pub const fn den(self)->u64{
self.den
}
#[inline]
pub const fn mul_int(&self,rhs:i64)->i64{
rhs*self.num/(self.den as i64)
}
#[inline]
pub const fn rhs_div_int(&self,rhs:i64)->i64{
rhs*(self.den as i64)/self.num
}
#[inline]
pub const fn mul_ref(&self,rhs:&Ratio64)->Ratio64{
let (num,den)=(self.num*rhs.num,self.den*rhs.den);
let d=gcd(num.unsigned_abs(),den);
Self{
num:num/(d as i64),
den:den/d,
}
}
}
//from num_traits crate
#[inline]
fn integer_decode_f32(f: f32) -> (u64, i16, i8) {
let bits: u32 = f.to_bits();
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
#[inline]
fn integer_decode_f64(f: f64) -> (u64, i16, i8) {
let bits: u64 = f.to_bits();
let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
let mantissa = if exponent == 0 {
(bits & 0xfffffffffffff) << 1
} else {
(bits & 0xfffffffffffff) | 0x10000000000000
};
// Exponent bias + mantissa shift
exponent -= 1023 + 52;
(mantissa, exponent, sign)
}
#[derive(Debug)]
pub enum Ratio64TryFromFloatError{
Nan,
Infinite,
Subnormal,
HighlyNegativeExponent(i16),
HighlyPositiveExponent(i16),
}
const MAX_DENOMINATOR:u128=u64::MAX as u128;
#[inline]
fn ratio64_from_mes((m,e,s):(u64,i16,i8))->Result<Ratio64,Ratio64TryFromFloatError>{
if e< -127{
//this can also just be zero
Err(Ratio64TryFromFloatError::HighlyNegativeExponent(e))
}else if e< -63{
//approximate input ratio within denominator limit
let mut target_num=m as u128;
let mut target_den=1u128<<-e;
let mut num=1;
let mut den=0;
let mut prev_num=0;
let mut prev_den=1;
while target_den!=0{
let whole=target_num/target_den;
(target_num,target_den)=(target_den,target_num-whole*target_den);
let new_num=whole*num+prev_num;
let new_den=whole*den+prev_den;
if MAX_DENOMINATOR<new_den{
break;
}else{
(prev_num,prev_den)=(num,den);
(num,den)=(new_num,new_den);
}
}
Ok(Ratio64::new(num as i64,den as u64).unwrap())
}else if e<0{
Ok(Ratio64::new((m as i64)*(s as i64),1<<-e).unwrap())
}else if (64-m.leading_zeros() as i16)+e<64{
Ok(Ratio64::new((m as i64)*(s as i64)*(1<<e),1).unwrap())
}else{
Err(Ratio64TryFromFloatError::HighlyPositiveExponent(e))
}
}
impl TryFrom<f32> for Ratio64{
type Error=Ratio64TryFromFloatError;
#[inline]
fn try_from(value:f32)->Result<Self,Self::Error>{
match value.classify(){
std::num::FpCategory::Nan=>Err(Self::Error::Nan),
std::num::FpCategory::Infinite=>Err(Self::Error::Infinite),
std::num::FpCategory::Zero=>Ok(Self::ZERO),
std::num::FpCategory::Subnormal
|std::num::FpCategory::Normal=>ratio64_from_mes(integer_decode_f32(value)),
}
}
}
impl TryFrom<f64> for Ratio64{
type Error=Ratio64TryFromFloatError;
#[inline]
fn try_from(value:f64)->Result<Self,Self::Error>{
match value.classify(){
std::num::FpCategory::Nan=>Err(Self::Error::Nan),
std::num::FpCategory::Infinite=>Err(Self::Error::Infinite),
std::num::FpCategory::Zero=>Ok(Self::ZERO),
std::num::FpCategory::Subnormal
|std::num::FpCategory::Normal=>ratio64_from_mes(integer_decode_f64(value)),
}
}
}
impl std::ops::Mul<Ratio64> for Ratio64{
type Output=Ratio64;
#[inline]
fn mul(self,rhs:Ratio64)->Self::Output{
let (num,den)=(self.num*rhs.num,self.den*rhs.den);
let d=gcd(num.unsigned_abs(),den);
Self{
num:num/(d as i64),
den:den/d,
}
}
}
impl std::ops::Mul<i64> for Ratio64{
type Output=Ratio64;
#[inline]
fn mul(self,rhs:i64)->Self::Output {
Self{
num:self.num*rhs,
den:self.den,
}
}
}
impl std::ops::Div<u64> for Ratio64{
type Output=Ratio64;
#[inline]
fn div(self,rhs:u64)->Self::Output {
Self{
num:self.num,
den:self.den*rhs,
}
}
}
#[derive(Clone,Copy,Debug,Hash)]
pub struct Ratio64Vec2{
pub x:Ratio64,
pub y:Ratio64,
}
impl Ratio64Vec2{
pub const ONE:Self=Self{x:Ratio64::ONE,y:Ratio64::ONE};
#[inline]
pub const fn new(x:Ratio64,y:Ratio64)->Self{
Self{x,y}
}
#[inline]
pub const fn mul_int(&self,rhs:glam::I64Vec2)->glam::I64Vec2{
glam::i64vec2(
self.x.mul_int(rhs.x),
self.y.mul_int(rhs.y),
)
}
}
impl std::ops::Mul<i64> for Ratio64Vec2{
type Output=Ratio64Vec2;
#[inline]
fn mul(self,rhs:i64)->Self::Output {
Self{
x:self.x*rhs,
y:self.y*rhs,
}
}
}
///[-pi,pi) = [-2^31,2^31-1]
#[derive(Clone,Copy,Hash)]
pub struct Angle32(i32);
impl Angle32{
const ANGLE32_TO_FLOAT64_RADIANS:f64=std::f64::consts::PI/((1i64<<31) as f64);
pub const FRAC_PI_2:Self=Self(1<<30);
pub const NEG_FRAC_PI_2:Self=Self(-1<<30);
pub const PI:Self=Self(-1<<31);
#[inline]
pub const fn wrap_from_i64(theta:i64)->Self{
//take lower bits
//note: this was checked on compiler explorer and compiles to 1 instruction!
Self(i32::from_ne_bytes(((theta&((1<<32)-1)) as u32).to_ne_bytes()))
}
#[inline]
pub fn clamp_from_i64(theta:i64)->Self{
//the assembly is a bit confusing for this, I thought it was checking the same thing twice
//but it's just checking and then overwriting the value for both upper and lower bounds.
Self(theta.clamp(i32::MIN as i64,i32::MAX as i64) as i32)
}
#[inline]
pub const fn get(&self)->i32{
self.0
}
/// Clamps the value towards the midpoint of the range.
/// Note that theta_min can be larger than theta_max and it will wrap clamp the other way around
#[inline]
pub fn clamp(&self,theta_min:Self,theta_max:Self)->Self{
//((max-min as u32)/2 as i32)+min
let midpoint=((
(theta_max.0 as u32)
.wrapping_sub(theta_min.0 as u32)
/2
) as i32)//(u32::MAX/2) as i32 ALWAYS works
.wrapping_add(theta_min.0);
//(theta-mid).clamp(max-mid,min-mid)+mid
Self(
self.0.wrapping_sub(midpoint)
.max(theta_min.0.wrapping_sub(midpoint))
.min(theta_max.0.wrapping_sub(midpoint))
.wrapping_add(midpoint)
)
}
#[inline]
pub fn cos_sin(&self)->(Planar64,Planar64){
/*
//cordic
let a=self.0 as u32;
//initialize based on the quadrant
let (mut x,mut y)=match (a&(1<<31)!=0,a&(1<<30)!=0){
(false,false)=>( 1i64<<32, 0i64 ),//TR
(false,true )=>( 0i64 , 1i64<<32),//TL
(true ,false)=>(-1i64<<32, 0i64 ),//BL
(true ,true )=>( 0i64 ,-1i64<<32),//BR
};
println!("x={} y={}",Planar64::raw(x),Planar64::raw(y));
for i in 0..30{
if a&(1<<(29-i))!=0{
(x,y)=(x-(y>>i),y+(x>>i));
}
println!("i={i} t={} x={} y={}",(a&(1<<(29-i))!=0) as u8,Planar64::raw(x),Planar64::raw(y));
}
//don't forget the gain
(Planar64::raw(x),Planar64::raw(y))
*/
let (s,c)=(self.0 as f64*Self::ANGLE32_TO_FLOAT64_RADIANS).sin_cos();
(Planar64::raw((c*((1u64<<32) as f64)) as i64),Planar64::raw((s*((1u64<<32) as f64)) as i64))
}
}
impl Into<f32> for Angle32{
#[inline]
fn into(self)->f32{
(self.0 as f64*Self::ANGLE32_TO_FLOAT64_RADIANS) as f32
}
}
impl std::ops::Neg for Angle32{
type Output=Angle32;
#[inline]
fn neg(self)->Self::Output{
Angle32(self.0.wrapping_neg())
}
}
impl std::ops::Add<Angle32> for Angle32{
type Output=Angle32;
#[inline]
fn add(self,rhs:Self)->Self::Output {
Angle32(self.0.wrapping_add(rhs.0))
}
}
impl std::ops::Sub<Angle32> for Angle32{
type Output=Angle32;
#[inline]
fn sub(self,rhs:Self)->Self::Output {
Angle32(self.0.wrapping_sub(rhs.0))
}
}
impl std::ops::Mul<i32> for Angle32{
type Output=Angle32;
#[inline]
fn mul(self,rhs:i32)->Self::Output {
Angle32(self.0.wrapping_mul(rhs))
}
}
impl std::ops::Mul<Angle32> for Angle32{
type Output=Angle32;
#[inline]
fn mul(self,rhs:Self)->Self::Output {
Angle32(self.0.wrapping_mul(rhs.0))
}
}
#[test]
fn angle_sin_cos(){
fn close_enough(lhs:Planar64,rhs:Planar64)->bool{
(lhs-rhs).abs()<Planar64::EPSILON*4
}
fn test_angle(f:f64){
let a=Angle32((f/Angle32::ANGLE32_TO_FLOAT64_RADIANS) as i32);
println!("a={:#034b}",a.0);
let (c,s)=a.cos_sin();
let h=(s*s+c*c).sqrt();
println!("cordic s={} c={}",(s/h).divide(),(c/h).divide());
let (fs,fc)=f.sin_cos();
println!("float s={} c={}",fs,fc);
assert!(close_enough((c/h).divide().fix_1(),Planar64::raw((fc*((1u64<<32) as f64)) as i64)));
assert!(close_enough((s/h).divide().fix_1(),Planar64::raw((fs*((1u64<<32) as f64)) as i64)));
}
test_angle(1.0);
test_angle(std::f64::consts::PI/4.0);
test_angle(std::f64::consts::PI/8.0);
}
/* Unit type unused for now, may revive it for map files
///[-1.0,1.0] = [-2^30,2^30]
pub struct Unit32(i32);
impl Unit32{
#[inline]
pub fn as_planar64(&self) -> Planar64{
Planar64(4*(self.0 as i64))
}
}
const UNIT32_ONE_FLOAT64=((1<<30) as f64);
///[-1.0,1.0] = [-2^30,2^30]
pub struct Unit32Vec3(glam::IVec3);
impl TryFrom<[f32;3]> for Unit32Vec3{
type Error=Unit32TryFromFloatError;
fn try_from(value:[f32;3])->Result<Self,Self::Error>{
Ok(Self(glam::ivec3(
Unit32::try_from(Planar64::try_from(value[0])?)?.0,
Unit32::try_from(Planar64::try_from(value[1])?)?.0,
Unit32::try_from(Planar64::try_from(value[2])?)?.0,
)))
}
}
*/
pub type Planar64TryFromFloatError=fixed_wide::fixed::FixedFromFloatError;
pub type Planar64=fixed_wide::types::I32F32;
pub type Planar64Vec3=linear_ops::types::Vector3<Planar64>;
pub type Planar64Mat3=linear_ops::types::Matrix3<Planar64>;
pub mod vec3{
use super::*;
pub use linear_ops::types::Vector3;
pub const MIN:Planar64Vec3=Planar64Vec3::new([Planar64::MIN;3]);
pub const MAX:Planar64Vec3=Planar64Vec3::new([Planar64::MAX;3]);
pub const ZERO:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO;3]);
pub const ZERO_2:linear_ops::types::Vector3<Fixed::<2,64>>=linear_ops::types::Vector3::new([Fixed::<2,64>::ZERO;3]);
pub const X:Planar64Vec3=Planar64Vec3::new([Planar64::ONE,Planar64::ZERO,Planar64::ZERO]);
pub const Y:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::ONE,Planar64::ZERO]);
pub const Z:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::ZERO,Planar64::ONE]);
pub const ONE:Planar64Vec3=Planar64Vec3::new([Planar64::ONE,Planar64::ONE,Planar64::ONE]);
pub const NEG_X:Planar64Vec3=Planar64Vec3::new([Planar64::NEG_ONE,Planar64::ZERO,Planar64::ZERO]);
pub const NEG_Y:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::NEG_ONE,Planar64::ZERO]);
pub const NEG_Z:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::ZERO,Planar64::NEG_ONE]);
pub const NEG_ONE:Planar64Vec3=Planar64Vec3::new([Planar64::NEG_ONE,Planar64::NEG_ONE,Planar64::NEG_ONE]);
#[inline]
pub const fn int(x:i32,y:i32,z:i32)->Planar64Vec3{
Planar64Vec3::new([Planar64::raw((x as i64)<<32),Planar64::raw((y as i64)<<32),Planar64::raw((z as i64)<<32)])
}
#[inline]
pub fn raw_array(array:[i64;3])->Planar64Vec3{
Planar64Vec3::new(array.map(Planar64::raw))
}
#[inline]
pub fn raw_xyz(x:i64,y:i64,z:i64)->Planar64Vec3{
Planar64Vec3::new([Planar64::raw(x),Planar64::raw(y),Planar64::raw(z)])
}
#[inline]
pub fn try_from_f32_array([x,y,z]:[f32;3])->Result<Planar64Vec3,Planar64TryFromFloatError>{
Ok(Planar64Vec3::new([
try_from_f32(x)?,
try_from_f32(y)?,
try_from_f32(z)?,
]))
}
}
#[inline]
pub fn int(value:i32)->Planar64{
Planar64::from(value)
}
#[inline]
pub fn try_from_f32(value:f32)->Result<Planar64,Planar64TryFromFloatError>{
let result:Result<Planar64,_>=value.try_into();
match result{
Ok(ok)=>Ok(ok),
Err(e)=>e.underflow_to_zero(),
}
}
pub mod mat3{
use super::*;
pub use linear_ops::types::Matrix3;
#[inline]
pub const fn identity()->Planar64Mat3{
Planar64Mat3::new([
[Planar64::ONE,Planar64::ZERO,Planar64::ZERO],
[Planar64::ZERO,Planar64::ONE,Planar64::ZERO],
[Planar64::ZERO,Planar64::ZERO,Planar64::ONE],
])
}
#[inline]
pub fn from_diagonal(diag:Planar64Vec3)->Planar64Mat3{
Planar64Mat3::new([
[diag.x,Planar64::ZERO,Planar64::ZERO],
[Planar64::ZERO,diag.y,Planar64::ZERO],
[Planar64::ZERO,Planar64::ZERO,diag.z],
])
}
#[inline]
pub fn from_rotation_yx(x:Angle32,y:Angle32)->Planar64Mat3{
let (xc,xs)=x.cos_sin();
let (yc,ys)=y.cos_sin();
Planar64Mat3::from_cols([
Planar64Vec3::new([xc,Planar64::ZERO,-xs]),
Planar64Vec3::new([(xs*ys).fix_1(),yc,(xc*ys).fix_1()]),
Planar64Vec3::new([(xs*yc).fix_1(),-ys,(xc*yc).fix_1()]),
])
}
#[inline]
pub fn from_rotation_y(y:Angle32)->Planar64Mat3{
let (c,s)=y.cos_sin();
Planar64Mat3::from_cols([
Planar64Vec3::new([c,Planar64::ZERO,-s]),
vec3::Y,
Planar64Vec3::new([s,Planar64::ZERO,c]),
])
}
#[inline]
pub fn try_from_f32_array_2d([x_axis,y_axis,z_axis]:[[f32;3];3])->Result<Planar64Mat3,Planar64TryFromFloatError>{
Ok(Planar64Mat3::new([
vec3::try_from_f32_array(x_axis)?.to_array(),
vec3::try_from_f32_array(y_axis)?.to_array(),
vec3::try_from_f32_array(z_axis)?.to_array(),
]))
}
}
#[derive(Clone,Copy,Default,Hash,Eq,PartialEq)]
pub struct Planar64Affine3{
pub matrix3:Planar64Mat3,//includes scale above 1
pub translation:Planar64Vec3,
}
impl Planar64Affine3{
#[inline]
pub const fn new(matrix3:Planar64Mat3,translation:Planar64Vec3)->Self{
Self{matrix3,translation}
}
#[inline]
pub fn transform_point3(&self,point:Planar64Vec3)->vec3::Vector3<Fixed<2,64>>{
self.translation.fix_2()+self.matrix3*point
}
}
impl Into<glam::Mat4> for Planar64Affine3{
#[inline]
fn into(self)->glam::Mat4{
let matrix3=self.matrix3.to_array().map(|row|row.map(Into::<f32>::into));
let translation=self.translation.to_array().map(Into::<f32>::into);
glam::Mat4::from_cols_array(&[
matrix3[0][0],matrix3[0][1],matrix3[0][2],0.0,
matrix3[1][0],matrix3[1][1],matrix3[1][2],0.0,
matrix3[2][0],matrix3[2][1],matrix3[2][2],0.0,
translation[0],translation[1],translation[2],1.0
])
}
}
#[test]
fn test_sqrt(){
let r=int(400);
assert_eq!(r,Planar64::raw(1717986918400));
let s=r.sqrt();
assert_eq!(s,Planar64::raw(85899345920));
}

View File

@ -1,16 +0,0 @@
pub mod bvh;
pub mod map;
pub mod run;
pub mod aabb;
pub mod model;
pub mod mouse;
pub mod timer;
pub mod integer;
pub mod physics;
pub mod session;
pub mod updatable;
pub mod instruction;
pub mod gameplay_attributes;
pub mod gameplay_modes;
pub mod gameplay_style;
pub mod controls_bitflag;

View File

@ -1,14 +0,0 @@
use crate::model;
use crate::gameplay_modes;
use crate::gameplay_attributes;
//this is a temporary struct to try to get the code running again
//TODO: use snf::map::Region to update the data in physics and graphics instead of this
pub struct CompleteMap{
pub modes:gameplay_modes::Modes,
pub attributes:Vec<gameplay_attributes::CollisionAttributes>,
pub meshes:Vec<model::Mesh>,
pub models:Vec<model::Model>,
//RenderPattern
pub textures:Vec<Vec<u8>>,
pub render_configs:Vec<model::RenderConfig>,
}

View File

@ -1,133 +0,0 @@
use crate::integer::{Planar64Vec3,Planar64Affine3};
use crate::gameplay_attributes;
pub type TextureCoordinate=glam::Vec2;
pub type Color4=glam::Vec4;
#[derive(Clone,Copy,Hash,id::Id,PartialEq,Eq)]
pub struct PositionId(u32);
#[derive(Clone,Copy,Hash,id::Id,PartialEq,Eq)]
pub struct TextureCoordinateId(u32);
#[derive(Clone,Copy,Hash,id::Id,PartialEq,Eq)]
pub struct NormalId(u32);
#[derive(Clone,Copy,Hash,id::Id,PartialEq,Eq)]
pub struct ColorId(u32);
#[derive(Clone,Hash,PartialEq,Eq)]
pub struct IndexedVertex{
pub pos:PositionId,
pub tex:TextureCoordinateId,
pub normal:NormalId,
pub color:ColorId,
}
#[derive(Clone,Copy,Hash,id::Id,PartialEq,Eq)]
pub struct VertexId(u32);
pub type IndexedVertexList=Vec<VertexId>;
pub trait PolygonIter{
fn polys(&self)->impl Iterator<Item=&[VertexId]>;
}
pub trait MapVertexId{
fn map_vertex_id<F:Fn(VertexId)->VertexId>(self,f:F)->Self;
}
#[derive(Clone)]
pub struct PolygonList(Vec<IndexedVertexList>);
impl PolygonList{
pub const fn new(list:Vec<IndexedVertexList>)->Self{
Self(list)
}
pub fn extend<T:IntoIterator<Item=IndexedVertexList>>(&mut self,iter:T){
self.0.extend(iter);
}
}
impl PolygonIter for PolygonList{
fn polys(&self)->impl Iterator<Item=&[VertexId]>{
self.0.iter().map(|poly|poly.as_slice())
}
}
impl MapVertexId for PolygonList{
fn map_vertex_id<F:Fn(VertexId)->VertexId>(self,f:F)->Self{
Self(self.0.into_iter().map(|ivl|ivl.into_iter().map(&f).collect()).collect())
}
}
// pub struct TriangleStrip(IndexedVertexList);
// impl PolygonIter for TriangleStrip{
// fn polys(&self)->impl Iterator<Item=&[VertexId]>{
// self.0.vertices.windows(3).enumerate().map(|(i,s)|if i&0!=0{return s.iter().rev()}else{return s.iter()})
// }
// }
#[derive(Clone,Copy,Hash,id::Id,PartialEq,Eq)]
pub struct PolygonGroupId(u32);
#[derive(Clone)]
pub enum PolygonGroup{
PolygonList(PolygonList),
//TriangleStrip(TriangleStrip),
}
impl PolygonIter for PolygonGroup{
fn polys(&self)->impl Iterator<Item=&[VertexId]>{
match self{
PolygonGroup::PolygonList(list)=>list.polys(),
//PolygonGroup::TriangleStrip(strip)=>strip.polys(),
}
}
}
impl MapVertexId for PolygonGroup{
fn map_vertex_id<F:Fn(VertexId)->VertexId>(self,f:F)->Self{
match self{
PolygonGroup::PolygonList(polys)=>Self::PolygonList(polys.map_vertex_id(f)),
}
}
}
/// Ah yes, a group of things to render at the same time
#[derive(Clone,Copy,Debug,Hash,id::Id,Eq,PartialEq)]
pub struct TextureId(u32);
#[derive(Clone,Copy,Hash,id::Id,Eq,PartialEq)]
pub struct RenderConfigId(u32);
#[derive(Clone,Copy,Default)]
pub struct RenderConfig{
pub texture:Option<TextureId>,
}
impl RenderConfig{
pub const fn texture(texture:TextureId)->Self{
Self{
texture:Some(texture),
}
}
}
#[derive(Clone)]
pub struct IndexedGraphicsGroup{
//Render pattern material/texture/shader/flat color
pub render:RenderConfigId,
pub groups:Vec<PolygonGroupId>,
}
#[derive(Clone,Default)]
pub struct IndexedPhysicsGroup{
//the polygons in this group are guaranteed to make a closed convex shape
pub groups:Vec<PolygonGroupId>,
}
//This is a superset of PhysicsModel and GraphicsModel
#[derive(Clone,Copy,Debug,Hash,id::Id,Eq,PartialEq)]
pub struct MeshId(u32);
#[derive(Clone)]
pub struct Mesh{
pub unique_pos:Vec<Planar64Vec3>,//Unit32Vec3
pub unique_normal:Vec<Planar64Vec3>,//Unit32Vec3
pub unique_tex:Vec<TextureCoordinate>,
pub unique_color:Vec<Color4>,
pub unique_vertices:Vec<IndexedVertex>,
//polygon groups are constant texture AND convexity slices
//note that this may need to be changed to be a list of individual faces
//for submeshes to work since face ids need to be consistent across submeshes
//so face == polygon_groups[face_id]
pub polygon_groups:Vec<PolygonGroup>,
//graphics indexed (by texture)
pub graphics_groups:Vec<IndexedGraphicsGroup>,
//physics indexed (by convexity)
pub physics_groups:Vec<IndexedPhysicsGroup>,
}
#[derive(Debug,Clone,Copy,Hash,id::Id,Eq,PartialEq)]
pub struct ModelId(u32);
pub struct Model{
pub mesh:MeshId,
pub attributes:gameplay_attributes::CollisionAttributesId,
pub color:Color4,//transparency is in here
pub transform:Planar64Affine3,
}

View File

@ -1,28 +0,0 @@
use crate::integer::Time;
#[derive(Clone,Debug)]
pub struct MouseState<T>{
pub pos:glam::IVec2,
pub time:Time<T>,
}
impl<T> Default for MouseState<T>{
fn default()->Self{
Self{
time:Time::ZERO,
pos:glam::IVec2::ZERO,
}
}
}
impl<T> MouseState<T>
where Time<T>:Copy,
{
pub fn lerp(&self,target:&MouseState<T>,time:Time<T>)->glam::IVec2{
let m0=self.pos.as_i64vec2();
let m1=target.pos.as_i64vec2();
//these are deltas
let t1t=(target.time-time).nanos();
let tt0=(time-self.time).nanos();
let dt=(target.time-self.time).nanos();
((m0*t1t+m1*tt0)/dt).as_ivec2()
}
}

View File

@ -1,31 +0,0 @@
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{}
pub type Time=crate::integer::Time<TimeInner>;
#[derive(Clone,Debug)]
pub enum Instruction{
ReplaceMouse(crate::mouse::MouseState<TimeInner>,crate::mouse::MouseState<TimeInner>),
SetNextMouse(crate::mouse::MouseState<TimeInner>),
SetMoveRight(bool),
SetMoveUp(bool),
SetMoveBack(bool),
SetMoveLeft(bool),
SetMoveDown(bool),
SetMoveForward(bool),
SetJump(bool),
SetZoom(bool),
/// Reset: fully replace the physics state.
/// This forgets all inputs and settings which need to be reapplied.
Reset,
/// Restart: Teleport to the start zone.
Restart,
/// Spawn: Teleport to a specific mode's spawn
/// Sets current mode & spawn
Spawn(crate::gameplay_modes::ModeId,crate::gameplay_modes::StageId),
Idle,
//Idle: there were no input events, but the simulation is safe to advance to this timestep
//for interpolation / networking / playback reasons, most playback heads will always want
//to be 1 instruction ahead to generate the next state for interpolation.
PracticeFly,
SetSensitivity(crate::integer::Ratio64Vec2),
}

View File

@ -1,113 +0,0 @@
use crate::timer::{TimerFixed,Realtime,Paused,Unpaused};
use crate::physics::{TimeInner as PhysicsTimeInner,Time as PhysicsTime};
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{}
pub type Time=crate::integer::Time<TimeInner>;
#[derive(Clone,Copy,Debug)]
pub enum FlagReason{
Anticheat,
StyleChange,
Clock,
Pause,
Flying,
Gravity,
Timescale,
TimeTravel,
Teleport,
}
impl ToString for FlagReason{
fn to_string(&self)->String{
self.as_ref().to_owned()
}
}
impl AsRef<str> for FlagReason{
fn as_ref(&self)->&str{
match self{
FlagReason::Anticheat=>"Passed through anticheat zone.",
FlagReason::StyleChange=>"Changed style.",
FlagReason::Clock=>"Incorrect clock. (This can be caused by internet hiccups)",
FlagReason::Pause=>"Pausing is not allowed in this style.",
FlagReason::Flying=>"Flying is not allowed in this style.",
FlagReason::Gravity=>"Gravity modification is not allowed in this style.",
FlagReason::Timescale=>"Timescale is not allowed in this style.",
FlagReason::TimeTravel=>"Time travel is not allowed in this style.",
FlagReason::Teleport=>"Illegal teleport.",
}
}
}
#[derive(Debug)]
pub enum Error{
NotStarted,
AlreadyStarted,
AlreadyFinished,
}
impl std::fmt::Display for Error{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for Error{}
#[derive(Clone,Copy,Debug)]
enum RunState{
Created,
Started{timer:TimerFixed<Realtime<PhysicsTimeInner,TimeInner>,Unpaused>},
Finished{timer:TimerFixed<Realtime<PhysicsTimeInner,TimeInner>,Paused>},
}
#[derive(Clone,Copy,Debug)]
pub struct Run{
state:RunState,
flagged:Option<FlagReason>,
}
impl Run{
pub fn new()->Self{
Self{
state:RunState::Created,
flagged:None,
}
}
pub fn time(&self,time:PhysicsTime)->Time{
match &self.state{
RunState::Created=>Time::ZERO,
RunState::Started{timer}=>timer.time(time),
RunState::Finished{timer}=>timer.time(time),
}
}
pub fn start(&mut self,time:PhysicsTime)->Result<(),Error>{
match &self.state{
RunState::Created=>{
self.state=RunState::Started{
timer:TimerFixed::new(time,Time::ZERO),
};
Ok(())
},
RunState::Started{..}=>Err(Error::AlreadyStarted),
RunState::Finished{..}=>Err(Error::AlreadyFinished),
}
}
pub fn finish(&mut self,time:PhysicsTime)->Result<(),Error>{
//this uses Copy
match &self.state{
RunState::Created=>Err(Error::NotStarted),
RunState::Started{timer}=>{
self.state=RunState::Finished{
timer:timer.into_paused(time),
};
Ok(())
},
RunState::Finished{..}=>Err(Error::AlreadyFinished),
}
}
pub fn flag(&mut self,flag_reason:FlagReason){
//don't replace the first reason the run was flagged
if self.flagged.is_none(){
self.flagged=Some(flag_reason);
}
}
}

View File

@ -1,3 +0,0 @@
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{}
pub type Time=crate::integer::Time<TimeInner>;

View File

@ -1,363 +0,0 @@
use crate::integer::{Time,Ratio64};
#[derive(Clone,Copy,Debug)]
pub struct Paused;
#[derive(Clone,Copy,Debug)]
pub struct Unpaused;
pub trait PauseState:Copy+std::fmt::Debug{
const IS_PAUSED:bool;
fn new()->Self;
}
impl PauseState for Paused{
const IS_PAUSED:bool=true;
fn new()->Self{
Self
}
}
impl PauseState for Unpaused{
const IS_PAUSED:bool=false;
fn new()->Self{
Self
}
}
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
enum Inner{}
type InnerTime=Time<Inner>;
#[derive(Clone,Copy,Debug)]
pub struct Realtime<In,Out>{
offset:InnerTime,
_in:core::marker::PhantomData<In>,
_out:core::marker::PhantomData<Out>,
}
impl<In,Out> Realtime<In,Out>{
pub const fn new(offset:InnerTime)->Self{
Self{
offset,
_in:core::marker::PhantomData,
_out:core::marker::PhantomData,
}
}
}
#[derive(Clone,Copy,Debug)]
pub struct Scaled<In,Out>{
scale:Ratio64,
offset:InnerTime,
_in:core::marker::PhantomData<In>,
_out:core::marker::PhantomData<Out>,
}
impl<In,Out> Scaled<In,Out>
where Time<In>:Copy,
{
pub const fn new(scale:Ratio64,offset:InnerTime)->Self{
Self{
scale,
offset,
_in:core::marker::PhantomData,
_out:core::marker::PhantomData,
}
}
const fn with_scale(scale:Ratio64)->Self{
Self::new(scale,InnerTime::ZERO)
}
const fn scale(&self,time:Time<In>)->InnerTime{
InnerTime::raw(self.scale.mul_int(time.get()))
}
const fn get_scale(&self)->Ratio64{
self.scale
}
fn set_scale(&mut self,time:Time<In>,new_scale:Ratio64){
let new_time=self.get_time(time);
self.scale=new_scale;
self.set_time(time,new_time);
}
}
pub trait TimerState{
type In;
type Out;
fn identity()->Self;
fn get_time(&self,time:Time<Self::In>)->Time<Self::Out>;
fn set_time(&mut self,time:Time<Self::In>,new_time:Time<Self::Out>);
fn get_offset(&self)->InnerTime;
fn set_offset(&mut self,offset:InnerTime);
}
impl<In,Out> TimerState for Realtime<In,Out>{
type In=In;
type Out=Out;
fn identity()->Self{
Self::new(InnerTime::ZERO)
}
fn get_time(&self,time:Time<In>)->Time<Out>{
time.coerce()+self.offset.coerce()
}
fn set_time(&mut self,time:Time<In>,new_time:Time<Out>){
self.offset=new_time.coerce()-time.coerce();
}
fn get_offset(&self)->InnerTime{
self.offset
}
fn set_offset(&mut self,offset:InnerTime){
self.offset=offset;
}
}
impl<In,Out> TimerState for Scaled<In,Out>
where Time<In>:Copy,
{
type In=In;
type Out=Out;
fn identity()->Self{
Self::new(Ratio64::ONE,InnerTime::ZERO)
}
fn get_time(&self,time:Time<In>)->Time<Out>{
(self.scale(time)+self.offset).coerce()
}
fn set_time(&mut self,time:Time<In>,new_time:Time<Out>){
self.offset=new_time.coerce()-self.scale(time);
}
fn get_offset(&self)->InnerTime{
self.offset
}
fn set_offset(&mut self,offset:InnerTime){
self.offset=offset;
}
}
#[derive(Clone,Copy,Debug)]
pub struct TimerFixed<T:TimerState,P:PauseState>{
state:T,
_paused:P,
}
//scaled timer methods are generic across PauseState
impl<P:PauseState,In,Out> TimerFixed<Scaled<In,Out>,P>
where Time<In>:Copy,
{
pub fn scaled(time:Time<In>,new_time:Time<Out>,scale:Ratio64)->Self{
let mut timer=Self{
state:Scaled::with_scale(scale),
_paused:P::new(),
};
timer.set_time(time,new_time);
timer
}
pub const fn get_scale(&self)->Ratio64{
self.state.get_scale()
}
pub fn set_scale(&mut self,time:Time<In>,new_scale:Ratio64){
self.state.set_scale(time,new_scale)
}
}
//pause and unpause is generic across TimerState
impl<T:TimerState> TimerFixed<T,Paused>
where Time<T::In>:Copy,
{
pub fn into_unpaused(self,time:Time<T::In>)->TimerFixed<T,Unpaused>{
let new_time=self.time(time);
let mut timer=TimerFixed{
state:self.state,
_paused:Unpaused,
};
timer.set_time(time,new_time);
timer
}
}
impl<T:TimerState> TimerFixed<T,Unpaused>
where Time<T::In>:Copy,
{
pub fn into_paused(self,time:Time<T::In>)->TimerFixed<T,Paused>{
let new_time=self.time(time);
let mut timer=TimerFixed{
state:self.state,
_paused:Paused,
};
timer.set_time(time,new_time);
timer
}
}
//the new constructor and time queries are generic across both
impl<T:TimerState,P:PauseState> TimerFixed<T,P>{
pub fn new(time:Time<T::In>,new_time:Time<T::Out>)->Self{
let mut timer=Self{
state:T::identity(),
_paused:P::new(),
};
timer.set_time(time,new_time);
timer
}
pub fn from_state(state:T)->Self{
Self{
state,
_paused:P::new(),
}
}
pub fn into_state(self)->T{
self.state
}
pub fn time(&self,time:Time<T::In>)->Time<T::Out>{
match P::IS_PAUSED{
true=>self.state.get_offset().coerce(),
false=>self.state.get_time(time),
}
}
pub fn set_time(&mut self,time:Time<T::In>,new_time:Time<T::Out>){
match P::IS_PAUSED{
true=>self.state.set_offset(new_time.coerce()),
false=>self.state.set_time(time,new_time),
}
}
}
#[derive(Debug)]
pub enum Error{
AlreadyPaused,
AlreadyUnpaused,
}
impl std::fmt::Display for Error{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for Error{}
//wrapper type which holds type state internally
#[derive(Clone,Debug)]
pub enum Timer<T:TimerState>{
Paused(TimerFixed<T,Paused>),
Unpaused(TimerFixed<T,Unpaused>),
}
impl<T:TimerState> Timer<T>
where
T:Copy,
Time<T::In>:Copy,
{
pub fn from_state(state:T,paused:bool)->Self{
match paused{
true=>Self::Paused(TimerFixed::from_state(state)),
false=>Self::Unpaused(TimerFixed::from_state(state)),
}
}
pub fn into_state(self)->(T,bool){
match self{
Self::Paused(timer)=>(timer.into_state(),true),
Self::Unpaused(timer)=>(timer.into_state(),false),
}
}
pub fn paused(time:Time<T::In>,new_time:Time<T::Out>)->Self{
Self::Paused(TimerFixed::new(time,new_time))
}
pub fn unpaused(time:Time<T::In>,new_time:Time<T::Out>)->Self{
Self::Unpaused(TimerFixed::new(time,new_time))
}
pub fn time(&self,time:Time<T::In>)->Time<T::Out>{
match self{
Self::Paused(timer)=>timer.time(time),
Self::Unpaused(timer)=>timer.time(time),
}
}
pub fn set_time(&mut self,time:Time<T::In>,new_time:Time<T::Out>){
match self{
Self::Paused(timer)=>timer.set_time(time,new_time),
Self::Unpaused(timer)=>timer.set_time(time,new_time),
}
}
pub fn pause(&mut self,time:Time<T::In>)->Result<(),Error>{
*self=match *self{
Self::Paused(_)=>return Err(Error::AlreadyPaused),
Self::Unpaused(timer)=>Self::Paused(timer.into_paused(time)),
};
Ok(())
}
pub fn unpause(&mut self,time:Time<T::In>)->Result<(),Error>{
*self=match *self{
Self::Paused(timer)=>Self::Unpaused(timer.into_unpaused(time)),
Self::Unpaused(_)=>return Err(Error::AlreadyUnpaused),
};
Ok(())
}
pub fn is_paused(&self)->bool{
match self{
Self::Paused(_)=>true,
Self::Unpaused(_)=>false,
}
}
pub fn set_paused(&mut self,time:Time<T::In>,paused:bool)->Result<(),Error>{
match paused{
true=>self.pause(time),
false=>self.unpause(time),
}
}
}
//scaled timer methods are generic across PauseState
impl<In,Out> Timer<Scaled<In,Out>>
where Time<In>:Copy,
{
pub const fn get_scale(&self)->Ratio64{
match self{
Self::Paused(timer)=>timer.get_scale(),
Self::Unpaused(timer)=>timer.get_scale(),
}
}
pub fn set_scale(&mut self,time:Time<In>,new_scale:Ratio64){
match self{
Self::Paused(timer)=>timer.set_scale(time,new_scale),
Self::Unpaused(timer)=>timer.set_scale(time,new_scale),
}
}
}
#[cfg(test)]
mod test{
use super::*;
macro_rules! sec {
($s: expr) => {
Time::from_secs($s)
};
}
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
enum Parent{}
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
enum Calculated{}
#[test]
fn test_timerfixed_scaled(){
//create a paused timer that reads 0s
let timer=TimerFixed::<Scaled<Parent,Calculated>,Paused>::from_state(Scaled::new(0.5f32.try_into().unwrap(),sec!(0)));
//the paused timer at 1 second should read 0s
assert_eq!(timer.time(sec!(1)),sec!(0));
//unpause it after one second
let timer=timer.into_unpaused(sec!(1));
//the timer at 6 seconds should read 2.5s
assert_eq!(timer.time(sec!(6)),Time::from_millis(2500));
//pause the timer after 11 seconds
let timer=timer.into_paused(sec!(11));
//the paused timer at 20 seconds should read 5s
assert_eq!(timer.time(sec!(20)),sec!(5));
}
#[test]
fn test_timer()->Result<(),Error>{
//create a paused timer that reads 0s
let mut timer=Timer::<Realtime<Parent,Calculated>>::paused(sec!(0),sec!(0));
//the paused timer at 1 second should read 0s
assert_eq!(timer.time(sec!(1)),sec!(0));
//unpause it after one second
timer.unpause(sec!(1))?;
//the timer at 6 seconds should read 5s
assert_eq!(timer.time(sec!(6)),sec!(5));
//pause the timer after 11 seconds
timer.pause(sec!(11))?;
//the paused timer at 20 seconds should read 10s
assert_eq!(timer.time(sec!(20)),sec!(10));
Ok(())
}
}

View File

@ -1,56 +0,0 @@
pub trait Updatable<Updater>{
fn update(&mut self,update:Updater);
}
#[derive(Clone,Copy,Hash,Eq,PartialEq)]
struct InnerId(u32);
#[derive(Clone)]
struct Inner{
id:InnerId,
enabled:bool,
}
#[derive(Clone,Copy,Hash,Eq,PartialEq)]
struct OuterId(u32);
struct Outer{
id:OuterId,
inners:std::collections::HashMap<InnerId,Inner>,
}
enum Update<I,U>{
Insert(I),
Update(U),
Remove
}
struct InnerUpdate{
//#[updatable(Update)]
enabled:Option<bool>,
}
struct OuterUpdate{
//#[updatable(Insert,Update,Remove)]
inners:std::collections::HashMap<InnerId,Update<Inner,InnerUpdate>>,
//#[updatable(Update)]
//inners:std::collections::HashMap<InnerId,InnerUpdate>,
}
impl Updatable<InnerUpdate> for Inner{
fn update(&mut self,update:InnerUpdate){
if let Some(enabled)=update.enabled{
self.enabled=enabled;
}
}
}
impl Updatable<OuterUpdate> for Outer{
fn update(&mut self,update:OuterUpdate){
for (id,up) in update.inners{
match up{
Update::Insert(new_inner)=>self.inners.insert(id,new_inner),
Update::Update(inner_update)=>self.inners.get_mut(&id).map(|inner|{
let old=inner.clone();
inner.update(inner_update);
old
}),
Update::Remove=>self.inners.remove(&id),
};
}
}
}
//*/

View File

@ -1 +0,0 @@
/target

View File

@ -1,21 +0,0 @@
[package]
name = "strafesnet_deferred_loader"
version = "0.4.1"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Acquire IDs for objects before loading them in bulk."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["legacy"]
legacy = ["dep:url","dep:vbsp"]
#roblox = ["dep:lazy-regex"]
#source = ["dep:vbsp"]
[dependencies]
strafesnet_common = { path = "../common", registry = "strafesnet" }
url = { version = "2.5.2", optional = true }
vbsp = { version = "0.6.0", optional = true }

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,19 +0,0 @@
Texture Loader
==============
## Texture loader, designed to be used in conjunction with rbx_loader, bsp_loader or Strafe Client
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>

View File

@ -1,34 +0,0 @@
#[cfg(feature="legacy")]
mod roblox_legacy;
#[cfg(feature="legacy")]
mod source_legacy;
#[cfg(feature="roblox")]
mod roblox;
#[cfg(feature="source")]
mod source;
#[cfg(any(feature="roblox",feature="legacy"))]
pub mod rbxassetid;
pub mod texture;
#[cfg(any(feature="source",feature="legacy"))]
pub mod valve_mesh;
#[cfg(any(feature="roblox",feature="legacy"))]
pub mod roblox_mesh;
#[cfg(feature="legacy")]
pub fn roblox_legacy()->roblox_legacy::Loader{
roblox_legacy::Loader::new()
}
#[cfg(feature="legacy")]
pub fn source_legacy()->source_legacy::Loader{
source_legacy::Loader::new()
}
#[cfg(feature="roblox")]
pub fn roblox()->roblox::Loader{
roblox::Loader::new()
}
#[cfg(feature="source")]
pub fn source()->source::Loader{
source::Loader::new()
}

View File

@ -1,48 +0,0 @@
#[derive(Hash,Eq,PartialEq)]
pub struct RobloxAssetId(pub u64);
#[derive(Debug)]
#[allow(dead_code)]
pub struct StringWithError{
string:String,
error:RobloxAssetIdParseErr,
}
impl std::fmt::Display for StringWithError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for StringWithError{}
impl StringWithError{
const fn new(
string:String,
error:RobloxAssetIdParseErr,
)->Self{
Self{string,error}
}
}
#[derive(Debug)]
pub enum RobloxAssetIdParseErr{
Url(url::ParseError),
UnknownScheme,
ParseInt(std::num::ParseIntError),
MissingAssetId,
}
impl std::str::FromStr for RobloxAssetId{
type Err=StringWithError;
fn from_str(s:&str)->Result<Self,Self::Err>{
let url=url::Url::parse(s).map_err(|e|StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::Url(e)))?;
let parsed_asset_id=match url.scheme(){
"rbxassetid"=>url.domain().ok_or_else(||StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::MissingAssetId))?.parse(),
"http"|"https"=>{
let (_,asset_id)=url.query_pairs()
.find(|(id,_)|match id.as_ref(){
"ID"|"id"|"Id"|"iD"=>true,
_=>false,
}).ok_or_else(||StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::MissingAssetId))?;
asset_id.parse()
},
_=>Err(StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::UnknownScheme))?,
};
Ok(Self(parsed_asset_id.map_err(|e|StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::ParseInt(e)))?))
}
}

View File

@ -1,112 +0,0 @@
use std::io::Read;
use std::collections::HashMap;
use crate::roblox_mesh;
use crate::texture::{RenderConfigs,Texture};
use strafesnet_common::model::{MeshId,RenderConfig,RenderConfigId,TextureId};
use crate::rbxassetid::RobloxAssetId;
#[derive(Default)]
pub struct RenderConfigLoader{
texture_count:u32,
render_configs:Vec<RenderConfig>,
render_config_id_from_asset_id:HashMap<Option<RobloxAssetId>,RenderConfigId>,
}
impl RenderConfigLoader{
pub fn acquire_render_config_id(&mut self,name:Option<&str>)->RenderConfigId{
let render_id=RenderConfigId::new(self.render_config_id_from_asset_id.len() as u32);
let index=name.and_then(|name|{
match name.parse::<RobloxAssetId>(){
Ok(asset_id)=>Some(asset_id),
Err(e)=>{
println!("Failed to parse AssetId: {e}");
None
},
}
});
*self.render_config_id_from_asset_id.entry(index).or_insert_with(||{
//create the render config.
let render_config=if name.is_some(){
let render_config=RenderConfig::texture(TextureId::new(self.texture_count));
self.texture_count+=1;
render_config
}else{
RenderConfig::default()
};
self.render_configs.push(render_config);
render_id
})
}
}
#[derive(Default)]
pub struct MeshLoader{
mesh_id_from_asset_id:HashMap<Option<RobloxAssetId>,MeshId>,
}
impl MeshLoader{
pub fn acquire_mesh_id(&mut self,name:&str)->MeshId{
let mesh_id=MeshId::new(self.mesh_id_from_asset_id.len() as u32);
let index=match name.parse::<RobloxAssetId>(){
Ok(asset_id)=>Some(asset_id),
Err(e)=>{
println!("Failed to parse AssetId: {e}");
None
},
};
*self.mesh_id_from_asset_id.entry(index).or_insert(mesh_id)
}
pub fn load_meshes(&mut self)->Result<roblox_mesh::Meshes,std::io::Error>{
let mut mesh_data=vec![None;self.mesh_id_from_asset_id.len()];
for (asset_id_option,mesh_id) in &self.mesh_id_from_asset_id{
if let Some(asset_id)=asset_id_option{
if let Ok(mut file)=std::fs::File::open(format!("meshes/{}",asset_id.0)){
//TODO: parallel
let mut data=Vec::<u8>::new();
file.read_to_end(&mut data)?;
mesh_data[mesh_id.get() as usize]=Some(roblox_mesh::RobloxMeshData::new(data));
}else{
println!("[roblox_legacy] no mesh name={}",asset_id.0);
}
}
}
Ok(roblox_mesh::Meshes::new(mesh_data))
}
}
pub struct Loader{
render_config_loader:RenderConfigLoader,
mesh_loader:MeshLoader,
}
impl Loader{
pub fn new()->Self{
Self{
render_config_loader:RenderConfigLoader::default(),
mesh_loader:MeshLoader::default(),
}
}
pub fn get_inner_mut(&mut self)->(&mut RenderConfigLoader,&mut MeshLoader){
(&mut self.render_config_loader,&mut self.mesh_loader)
}
pub fn into_render_configs(mut self)->Result<RenderConfigs,std::io::Error>{
let mut sorted_textures=vec![None;self.render_config_loader.texture_count as usize];
for (asset_id_option,render_config_id) in self.render_config_loader.render_config_id_from_asset_id{
let render_config=self.render_config_loader.render_configs.get_mut(render_config_id.get() as usize).unwrap();
if let (Some(asset_id),Some(texture_id))=(asset_id_option,render_config.texture){
if let Ok(mut file)=std::fs::File::open(format!("textures/{}.dds",asset_id.0)){
//TODO: parallel
let mut data=Vec::<u8>::new();
file.read_to_end(&mut data)?;
sorted_textures[texture_id.get() as usize]=Some(Texture::ImageDDS(data));
}else{
//texture failed to load
render_config.texture=None;
}
}
}
Ok(RenderConfigs::new(
sorted_textures,
self.render_config_loader.render_configs,
))
}
}

View File

@ -1,30 +0,0 @@
use strafesnet_common::model::MeshId;
#[derive(Clone)]
pub struct RobloxMeshData(Vec<u8>);
impl RobloxMeshData{
pub(crate) fn new(data:Vec<u8>)->Self{
Self(data)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
pub struct Meshes{
meshes:Vec<Option<RobloxMeshData>>,
}
impl Meshes{
pub(crate) const fn new(meshes:Vec<Option<RobloxMeshData>>)->Self{
Self{
meshes,
}
}
pub fn get_texture(&self,texture_id:MeshId)->Option<&RobloxMeshData>{
self.meshes.get(texture_id.get() as usize)?.as_ref()
}
pub fn into_iter(self)->impl Iterator<Item=(MeshId,RobloxMeshData)>{
self.meshes.into_iter().enumerate().filter_map(|(mesh_id,maybe_mesh)|
maybe_mesh.map(|mesh|(MeshId::new(mesh_id as u32),mesh))
)
}
}

View File

@ -1,102 +0,0 @@
use std::io::Read;
use std::collections::HashMap;
use crate::valve_mesh;
use crate::texture::{Texture,RenderConfigs};
use strafesnet_common::model::{MeshId,TextureId,RenderConfig,RenderConfigId};
pub struct RenderConfigLoader{
texture_count:u32,
render_configs:Vec<RenderConfig>,
texture_paths:HashMap<Option<Box<str>>,RenderConfigId>,
}
impl RenderConfigLoader{
pub fn acquire_render_config_id(&mut self,name:Option<&str>)->RenderConfigId{
let render_id=RenderConfigId::new(self.texture_paths.len() as u32);
*self.texture_paths.entry(name.map(Into::into)).or_insert_with(||{
//create the render config.
let render_config=if name.is_some(){
let render_config=RenderConfig::texture(TextureId::new(self.texture_count));
self.texture_count+=1;
render_config
}else{
RenderConfig::default()
};
self.render_configs.push(render_config);
render_id
})
}
}
pub struct MeshLoader{
mesh_paths:HashMap<Box<str>,MeshId>,
}
impl MeshLoader{
pub fn acquire_mesh_id(&mut self,name:&str)->MeshId{
let mesh_id=MeshId::new(self.mesh_paths.len() as u32);
*self.mesh_paths.entry(name.into()).or_insert(mesh_id)
}
//load_meshes should look like load_textures
pub fn load_meshes(&mut self,bsp:&vbsp::Bsp)->valve_mesh::Meshes{
let mut mesh_data=vec![None;self.mesh_paths.len()];
for (mesh_path,mesh_id) in &self.mesh_paths{
let mesh_path_lower=mesh_path.to_lowercase();
//.mdl, .vvd, .dx90.vtx
let path=std::path::PathBuf::from(mesh_path_lower.as_str());
let mut vvd_path=path.clone();
let mut vtx_path=path.clone();
vvd_path.set_extension("vvd");
vtx_path.set_extension("dx90.vtx");
match (bsp.pack.get(mesh_path_lower.as_str()),bsp.pack.get(vvd_path.as_os_str().to_str().unwrap()),bsp.pack.get(vtx_path.as_os_str().to_str().unwrap())){
(Ok(Some(mdl_file)),Ok(Some(vvd_file)),Ok(Some(vtx_file)))=>{
mesh_data[mesh_id.get() as usize]=Some(valve_mesh::ModelData{
mdl:valve_mesh::MdlData::new(mdl_file),
vtx:valve_mesh::VtxData::new(vtx_file),
vvd:valve_mesh::VvdData::new(vvd_file),
});
},
_=>println!("no model name={}",mesh_path),
}
}
valve_mesh::Meshes::new(mesh_data)
}
}
pub struct Loader{
render_config_loader:RenderConfigLoader,
mesh_loader:MeshLoader,
}
impl Loader{
pub fn new()->Self{
Self{
render_config_loader:RenderConfigLoader{
texture_count:0,
texture_paths:HashMap::new(),
render_configs:Vec::new(),
},
mesh_loader:MeshLoader{mesh_paths:HashMap::new()},
}
}
pub fn get_inner_mut(&mut self)->(&mut RenderConfigLoader,&mut MeshLoader){
(&mut self.render_config_loader,&mut self.mesh_loader)
}
pub fn into_render_configs(mut self)->Result<RenderConfigs,std::io::Error>{
let mut sorted_textures=vec![None;self.render_config_loader.texture_count as usize];
for (texture_path,render_config_id) in self.render_config_loader.texture_paths{
let render_config=self.render_config_loader.render_configs.get_mut(render_config_id.get() as usize).unwrap();
if let (Some(texture_path),Some(texture_id))=(texture_path,render_config.texture){
if let Ok(mut file)=std::fs::File::open(format!("textures/{}.dds",texture_path)){
//TODO: parallel
let mut data=Vec::<u8>::new();
file.read_to_end(&mut data)?;
sorted_textures[texture_id.get() as usize]=Some(Texture::ImageDDS(data));
}else{
//texture failed to load
render_config.texture=None;
}
}
}
Ok(RenderConfigs::new(
sorted_textures,
self.render_config_loader.render_configs,
))
}
}

View File

@ -1,39 +0,0 @@
use strafesnet_common::model::{TextureId,RenderConfigId,RenderConfig};
#[derive(Clone)]
pub enum Texture{
ImageDDS(Vec<u8>),
}
impl AsRef<[u8]> for Texture{
fn as_ref(&self)->&[u8]{
match self{
Texture::ImageDDS(data)=>data.as_ref(),
}
}
}
pub struct RenderConfigs{
textures:Vec<Option<Texture>>,
render_configs:Vec<RenderConfig>,
}
impl RenderConfigs{
pub(crate) const fn new(textures:Vec<Option<Texture>>,render_configs:Vec<RenderConfig>)->Self{
Self{
textures,
render_configs,
}
}
pub fn consume(self)->(
impl Iterator<Item=(TextureId,Texture)>,
impl Iterator<Item=(RenderConfigId,RenderConfig)>
){
(
self.textures.into_iter().enumerate().filter_map(|(texture_id,maybe_texture)|
maybe_texture.map(|texture|(TextureId::new(texture_id as u32),texture))
),
self.render_configs.into_iter().enumerate().map(|(render_id,render)|
(RenderConfigId::new(render_id as u32),render)
),
)
}
}

View File

@ -1,60 +0,0 @@
use strafesnet_common::model::MeshId;
//duplicate this code for now
#[derive(Clone)]
pub struct MdlData(Vec<u8>);
impl MdlData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
#[derive(Clone)]
pub struct VtxData(Vec<u8>);
impl VtxData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
#[derive(Clone)]
pub struct VvdData(Vec<u8>);
impl VvdData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
#[derive(Clone)]
pub struct ModelData{
pub mdl:MdlData,
pub vtx:VtxData,
pub vvd:VvdData,
}
//meshes is more prone to failure
pub struct Meshes{
meshes:Vec<Option<ModelData>>,
}
impl Meshes{
pub(crate) const fn new(meshes:Vec<Option<ModelData>>)->Self{
Self{
meshes,
}
}
pub fn get_texture(&self,texture_id:MeshId)->Option<&ModelData>{
self.meshes.get(texture_id.get() as usize)?.as_ref()
}
pub fn into_iter(self)->impl Iterator<Item=(MeshId,ModelData)>{
self.meshes.into_iter().enumerate().filter_map(|(mesh_id,maybe_mesh)|
maybe_mesh.map(|mesh|(MeshId::new(mesh_id as u32),mesh))
)
}
}

View File

@ -1 +0,0 @@
/target

View File

@ -1,20 +0,0 @@
[package]
name = "fixed_wide"
version = "0.1.1"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Fixed point numbers with optional widening Mul operator."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
[features]
default=[]
deferred-division=["dep:ratio_ops"]
wide-mul=[]
zeroes=["dep:arrayvec"]
[dependencies]
bnum = "0.12.0"
arrayvec = { version = "0.7.6", optional = true }
paste = "1.0.15"
ratio_ops = { path = "../ratio_ops", registry = "strafesnet", optional = true }

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,830 +0,0 @@
use bnum::{BInt,cast::As};
#[derive(Clone,Copy,Debug,Default,Hash,PartialEq,PartialOrd,Ord)]
/// A Fixed point number for which multiply operations widen the bits in the output. (when the wide-mul feature is enabled)
/// N is the number of u64s to use
/// F is the number of fractional bits (always N*32 lol)
pub struct Fixed<const N:usize,const F:usize>{
pub(crate)bits:BInt<{N}>,
}
impl<const N:usize,const F:usize> Fixed<N,F>{
pub const MAX:Self=Self::from_bits(BInt::<N>::MAX);
pub const MIN:Self=Self::from_bits(BInt::<N>::MIN);
pub const ZERO:Self=Self::from_bits(BInt::<N>::ZERO);
pub const EPSILON:Self=Self::from_bits(BInt::<N>::ONE);
pub const NEG_EPSILON:Self=Self::from_bits(BInt::<N>::NEG_ONE);
pub const ONE:Self=Self::from_bits(BInt::<N>::ONE.shl(F as u32));
pub const TWO:Self=Self::from_bits(BInt::<N>::TWO.shl(F as u32));
pub const HALF:Self=Self::from_bits(BInt::<N>::ONE.shl(F as u32-1));
pub const NEG_ONE:Self=Self::from_bits(BInt::<N>::NEG_ONE.shl(F as u32));
pub const NEG_TWO:Self=Self::from_bits(BInt::<N>::NEG_TWO.shl(F as u32));
pub const NEG_HALF:Self=Self::from_bits(BInt::<N>::NEG_ONE.shl(F as u32-1));
}
impl<const N:usize,const F:usize> Fixed<N,F>{
#[inline]
pub const fn from_bits(bits:BInt::<N>)->Self{
Self{
bits,
}
}
#[inline]
pub const fn to_bits(self)->BInt<N>{
self.bits
}
#[inline]
pub const fn raw_digit(value:i64)->Self{
let mut digits=[0u64;N];
digits[0]=value.abs() as u64;
//sign bit
digits[N-1]|=(value&i64::MIN) as u64;
Self::from_bits(BInt::from_bits(bnum::BUint::from_digits(digits)))
}
#[inline]
pub const fn is_zero(self)->bool{
self.bits.is_zero()
}
#[inline]
pub const fn is_negative(self)->bool{
self.bits.is_negative()
}
#[inline]
pub const fn is_positive(self)->bool{
self.bits.is_positive()
}
#[inline]
pub const fn abs(self)->Self{
Self::from_bits(self.bits.abs())
}
}
impl<const F:usize> Fixed<1,F>{
/// My old code called this function everywhere so let's provide it
#[inline]
pub const fn raw(value:i64)->Self{
Self::from_bits(BInt::from_bits(bnum::BUint::from_digit(value as u64)))
}
#[inline]
pub const fn to_raw(self)->i64{
let &[digit]=self.to_bits().to_bits().digits();
digit as i64
}
}
macro_rules! impl_from {
($($from:ty),*)=>{
$(
impl<const N:usize,const F:usize> From<$from> for Fixed<N,F>{
#[inline]
fn from(value:$from)->Self{
Self::from_bits(BInt::<{N}>::from(value)<<F as u32)
}
}
)*
};
}
impl_from!(
u8,u16,u32,u64,u128,usize,
i8,i16,i32,i64,i128,isize
);
impl<const N:usize,const F:usize,T> PartialEq<T> for Fixed<N,F>
where
T:Copy,
BInt::<N>:From<T>,
{
#[inline]
fn eq(&self,&other:&T)->bool{
self.bits.eq(&other.into())
}
}
impl<const N:usize,const F:usize> Eq for Fixed<N,F>{}
impl<const N:usize,const F:usize,T> PartialOrd<T> for Fixed<N,F>
where
T:Copy,
BInt::<N>:From<T>,
{
#[inline]
fn partial_cmp(&self,&other:&T)->Option<std::cmp::Ordering>{
self.bits.partial_cmp(&other.into())
}
}
impl<const N:usize,const F:usize> std::ops::Neg for Fixed<N,F>{
type Output=Self;
#[inline]
fn neg(self)->Self{
Self::from_bits(self.bits.neg())
}
}
impl<const N:usize,const F:usize> std::iter::Sum for Fixed<N,F>{
#[inline]
fn sum<I:Iterator<Item=Self>>(iter:I)->Self{
let mut sum=Self::ZERO;
for elem in iter{
sum+=elem;
}
sum
}
}
const fn signed_shift(lhs:u64,rhs:i32)->u64{
if rhs.is_negative(){
lhs>>-rhs
}else{
lhs<<rhs
}
}
macro_rules! impl_into_float {
( $output: ty, $unsigned:ty, $exponent_bits:expr, $mantissa_bits:expr ) => {
impl<const N:usize,const F:usize> Into<$output> for Fixed<N,F>{
#[inline]
fn into(self)->$output{
const DIGIT_SHIFT:u32=6;//Log2[64]
// SBBB BBBB
// 1001 1110 0000 0000
let sign=if self.bits.is_negative(){(1 as $unsigned)<<(<$unsigned>::BITS-1)}else{0};
let unsigned=self.bits.unsigned_abs();
let most_significant_bit=unsigned.bits();
let exp=if unsigned.is_zero(){
0
}else{
let msb=most_significant_bit as $unsigned;
let _127=((1 as $unsigned)<<($exponent_bits-1))-1;
let msb_offset=msb+_127-1-F as $unsigned;
msb_offset<<($mantissa_bits-1)
};
let digits=unsigned.digits();
let digit_index=most_significant_bit.saturating_sub(1)>>DIGIT_SHIFT;
let digit=digits[digit_index as usize];
//How many bits does the mantissa take from this digit
let take_bits=most_significant_bit-(digit_index<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let mut unmasked_mant=signed_shift(digit,rest_of_mantissa) as $unsigned;
if 0<rest_of_mantissa&&digit_index!=0{
//take the next digit down and shove some of its bits onto the bottom of the mantissa
let digit=digits[digit_index as usize-1];
let take_bits=most_significant_bit-((digit_index-1)<<DIGIT_SHIFT);
let rest_of_mantissa=$mantissa_bits as i32-(take_bits as i32);
let unmasked_mant2=signed_shift(digit,rest_of_mantissa) as $unsigned;
unmasked_mant|=unmasked_mant2;
}
let mant=unmasked_mant&((1 as $unsigned)<<($mantissa_bits-1))-1;
let bits=sign|exp|mant;
<$output>::from_bits(bits)
}
}
}
}
impl_into_float!(f32,u32,8,24);
impl_into_float!(f64,u64,11,53);
#[inline]
fn integer_decode_f32(f: f32) -> (u64, i16, bool) {
let bits: u32 = f.to_bits();
let sign: bool = bits & (1<<31) != 0;
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
#[inline]
fn integer_decode_f64(f: f64) -> (u64, i16, bool) {
let bits: u64 = f.to_bits();
let sign: bool = bits & (1u64<<63) != 0;
let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
let mantissa = if exponent == 0 {
(bits & 0xfffffffffffff) << 1
} else {
(bits & 0xfffffffffffff) | 0x10000000000000
};
// Exponent bias + mantissa shift
exponent -= 1023 + 52;
(mantissa, exponent, sign)
}
#[derive(Debug,Eq,PartialEq)]
pub enum FixedFromFloatError{
Nan,
Infinite,
Overflow,
Underflow,
}
impl FixedFromFloatError{
pub fn underflow_to_zero<const N:usize,const F:usize>(self)->Result<Fixed<N,F>,Self>{
match self{
FixedFromFloatError::Underflow=>Ok(Fixed::ZERO),
_=>Err(self),
}
}
}
macro_rules! impl_from_float {
( $decode:ident, $input: ty, $mantissa_bits:expr ) => {
impl<const N:usize,const F:usize> TryFrom<$input> for Fixed<N,F>{
type Error=FixedFromFloatError;
#[inline]
fn try_from(value:$input)->Result<Self,Self::Error>{
const DIGIT_SHIFT:u32=6;
match value.classify(){
std::num::FpCategory::Nan=>Err(FixedFromFloatError::Nan),
std::num::FpCategory::Infinite=>Err(FixedFromFloatError::Infinite),
std::num::FpCategory::Zero=>Ok(Self::ZERO),
std::num::FpCategory::Subnormal
|std::num::FpCategory::Normal
=>{
let (m,e,s)=$decode(value);
let mut digits=[0u64;N];
let most_significant_bit=e as i32+$mantissa_bits as i32+F as i32;
if most_significant_bit<0{
return Err(FixedFromFloatError::Underflow);
}
let digit_index=most_significant_bit>>DIGIT_SHIFT;
let digit=digits.get_mut(digit_index as usize).ok_or(FixedFromFloatError::Overflow)?;
let take_bits=most_significant_bit-(digit_index<<DIGIT_SHIFT);
let rest_of_mantissa=-($mantissa_bits as i32-(take_bits as i32));
*digit=signed_shift(m,rest_of_mantissa);
if rest_of_mantissa<0&&digit_index!=0{
//we don't care if some float bits are partially truncated
if let Some(digit)=digits.get_mut((digit_index-1) as usize){
let take_bits=most_significant_bit-((digit_index-1)<<DIGIT_SHIFT);
let rest_of_mantissa=-($mantissa_bits as i32-(take_bits as i32));
*digit=signed_shift(m,rest_of_mantissa);
}
}
let bits=BInt::from_bits(bnum::BUint::from_digits(digits));
Ok(if s{
Self::from_bits(bits.overflowing_neg().0)
}else{
Self::from_bits(bits)
})
},
}
}
}
}
}
impl_from_float!(integer_decode_f32,f32,24);
impl_from_float!(integer_decode_f64,f64,53);
impl<const N:usize,const F:usize> core::fmt::Display for Fixed<N,F>{
#[inline]
fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
let float:f32=(*self).into();
core::write!(f,"{:.3}",float)
}
}
macro_rules! impl_additive_operator {
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> $struct<N,F>{
#[inline]
pub const fn $method(self, other: Self) -> Self {
Self::from_bits(self.bits.$method(other.bits))
}
}
impl<const N:usize,const F:usize> core::ops::$trait for $struct<N,F>{
type Output = $output;
#[inline]
fn $method(self, other: Self) -> Self::Output {
self.$method(other)
}
}
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
where
BInt::<N>:From<U>,
{
type Output = $output;
#[inline]
fn $method(self, other: U) -> Self::Output {
Self::from_bits(self.bits.$method(BInt::<N>::from(other).shl(F as u32)))
}
}
};
}
macro_rules! impl_additive_assign_operator {
( $struct: ident, $trait: ident, $method: ident ) => {
impl<const N:usize,const F:usize> core::ops::$trait for $struct<N,F>{
#[inline]
fn $method(&mut self, other: Self) {
self.bits.$method(other.bits);
}
}
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
where
BInt::<N>:From<U>,
{
#[inline]
fn $method(&mut self, other: U) {
self.bits.$method(BInt::<N>::from(other).shl(F as u32));
}
}
};
}
// Impl arithmetic pperators
impl_additive_assign_operator!( Fixed, AddAssign, add_assign );
impl_additive_operator!( Fixed, Add, add, Self );
impl_additive_assign_operator!( Fixed, SubAssign, sub_assign );
impl_additive_operator!( Fixed, Sub, sub, Self );
impl_additive_assign_operator!( Fixed, RemAssign, rem_assign );
impl_additive_operator!( Fixed, Rem, rem, Self );
// Impl bitwise operators
impl_additive_assign_operator!( Fixed, BitAndAssign, bitand_assign );
impl_additive_operator!( Fixed, BitAnd, bitand, Self );
impl_additive_assign_operator!( Fixed, BitOrAssign, bitor_assign );
impl_additive_operator!( Fixed, BitOr, bitor, Self );
impl_additive_assign_operator!( Fixed, BitXorAssign, bitxor_assign );
impl_additive_operator!( Fixed, BitXor, bitxor, Self );
// non-wide operators. The result is the same width as the inputs.
// This macro is not used in the default configuration.
#[allow(unused_macros)]
macro_rules! impl_multiplicative_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> core::ops::$trait for $struct<$width,F>{
type Output = $output;
#[inline]
fn $method(self, other: Self) -> Self::Output {
paste::item!{
self.[<fixed_ $method>](other)
}
}
}
};
}
macro_rules! impl_multiplicative_assign_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $non_assign_method: ident ), $width:expr ) => {
impl<const F:usize> core::ops::$trait for $struct<$width,F>{
#[inline]
fn $method(&mut self, other: Self) {
paste::item!{
*self=self.[<fixed_ $non_assign_method>](other);
}
}
}
};
}
macro_rules! impl_multiply_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> $struct<$width,F>{
paste::item!{
#[inline]
pub fn [<fixed_ $method>](self, rhs: Self) -> Self {
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Self::from_bits(out.shr(F as u32).as_())
}else{
-Self::from_bits(out.shr(F as u32).as_())
}
}
}
}
#[cfg(not(feature="wide-mul"))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<i64> for Fixed<$width,{$width*32}>{
type Output=Self;
#[inline]
fn divide(self, other: i64)->Self::Output{
Self::from_bits(self.bits.div_euclid(BInt::from(other)))
}
}
}
}
macro_rules! impl_divide_operator_not_const_generic {
( ($struct: ident, $trait: ident, $method: ident, $output: ty ), $width:expr ) => {
impl<const F:usize> $struct<$width,F>{
paste::item!{
#[inline]
pub fn [<fixed_ $method>](self,other:Self)->Self{
//this only needs to be $width+F as u32/64+1 but MUH CONST GENERICS!!!!!
let lhs=self.bits.as_::<BInt::<{$width*2}>>().shl(F as u32);
let rhs=other.bits.as_::<BInt::<{$width*2}>>();
Self::from_bits(lhs.div_euclid(rhs).as_())
}
}
}
#[cfg(all(not(feature="wide-mul"),not(feature="deferred-division")))]
impl_multiplicative_operator_not_const_generic!(($struct, $trait, $method, $output ), $width);
#[cfg(all(not(feature="wide-mul"),feature="deferred-division"))]
impl<const F:usize> ratio_ops::ratio::Divide for $struct<$width,F>{
type Output = $output;
#[inline]
fn divide(self, other: Self) -> Self::Output {
paste::item!{
self.[<fixed_ $method>](other)
}
}
}
};
}
macro_rules! impl_multiplicative_operator {
( $struct: ident, $trait: ident, $method: ident, $inner_method: ident, $output: ty ) => {
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
where
BInt::<N>:From<U>+core::ops::$trait,
{
type Output = $output;
#[inline]
fn $method(self,other:U)->Self::Output{
Self::from_bits(self.bits.$inner_method(BInt::<N>::from(other)))
}
}
};
}
macro_rules! impl_multiplicative_assign_operator {
( $struct: ident, $trait: ident, $method: ident, $not_assign_method: ident ) => {
impl<const N:usize,const F:usize,U> core::ops::$trait<U> for $struct<N,F>
where
BInt::<N>:From<U>+core::ops::$trait,
{
#[inline]
fn $method(&mut self,other:U){
self.bits=self.bits.$not_assign_method(BInt::<N>::from(other));
}
}
};
}
macro_rules! macro_repeated{
(
$macro:ident,
$any:tt,
$($repeated:tt),*
)=>{
$(
$macro!($any, $repeated);
)*
};
}
macro_rules! macro_16 {
( $macro: ident, $any:tt ) => {
macro_repeated!($macro,$any,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);
}
}
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, MulAssign, mul_assign, mul) );
macro_16!( impl_multiply_operator_not_const_generic, (Fixed, Mul, mul, Self) );
macro_16!( impl_multiplicative_assign_operator_not_const_generic, (Fixed, DivAssign, div_assign, div) );
macro_16!( impl_divide_operator_not_const_generic, (Fixed, Div, div, Self) );
impl_multiplicative_assign_operator!( Fixed, MulAssign, mul_assign, mul );
impl_multiplicative_operator!( Fixed, Mul, mul, mul, Self );
impl_multiplicative_assign_operator!( Fixed, DivAssign, div_assign, div_euclid );
impl_multiplicative_operator!( Fixed, Div, div, div_euclid, Self );
#[cfg(feature="deferred-division")]
impl<const LHS_N:usize,const LHS_F:usize,const RHS_N:usize,const RHS_F:usize> core::ops::Div<Fixed<RHS_N,RHS_F>> for Fixed<LHS_N,LHS_F>{
type Output=ratio_ops::ratio::Ratio<Fixed<LHS_N,LHS_F>,Fixed<RHS_N,RHS_F>>;
#[inline]
fn div(self, other: Fixed<RHS_N,RHS_F>)->Self::Output{
ratio_ops::ratio::Ratio::new(self,other)
}
}
#[cfg(feature="deferred-division")]
impl<const N:usize,const F:usize> ratio_ops::ratio::Parity for Fixed<N,F>{
fn parity(&self)->bool{
self.is_negative()
}
}
macro_rules! impl_shift_operator {
( $struct: ident, $trait: ident, $method: ident, $output: ty ) => {
impl<const N:usize,const F:usize> core::ops::$trait<u32> for $struct<N,F>{
type Output = $output;
#[inline]
fn $method(self, other: u32) -> Self::Output {
Self::from_bits(self.bits.$method(other))
}
}
};
}
macro_rules! impl_shift_assign_operator {
( $struct: ident, $trait: ident, $method: ident ) => {
impl<const N:usize,const F:usize> core::ops::$trait<u32> for $struct<N,F>{
#[inline]
fn $method(&mut self, other: u32) {
self.bits.$method(other);
}
}
};
}
impl_shift_assign_operator!( Fixed, ShlAssign, shl_assign );
impl_shift_operator!( Fixed, Shl, shl, Self );
impl_shift_assign_operator!( Fixed, ShrAssign, shr_assign );
impl_shift_operator!( Fixed, Shr, shr, Self );
// wide operators. The result width is the sum of the input widths, i.e. none of the multiplication
#[allow(unused_macros)]
macro_rules! impl_wide_operators{
($lhs:expr,$rhs:expr)=>{
impl core::ops::Mul<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
#[inline]
fn mul(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
paste::item!{
self.[<wide_mul_ $lhs _ $rhs>](other)
}
}
}
#[cfg(not(feature="deferred-division"))]
impl core::ops::Div<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
#[inline]
fn div(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
paste::item!{
self.[<wide_div_ $lhs _ $rhs>](other)
}
}
}
#[cfg(feature="deferred-division")]
impl ratio_ops::ratio::Divide<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Output=Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>;
#[inline]
fn divide(self, other: Fixed<$rhs,{$rhs*32}>)->Self::Output{
paste::item!{
self.[<wide_div_ $lhs _ $rhs>](other)
}
}
}
}
}
// WIDE MUL: multiply into a wider type
// let a = I32F32::ONE;
// let b:I64F64 = a.wide_mul(a);
macro_rules! impl_wide_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<wide_mul_ $lhs _ $rhs>](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
let lhs=self.bits.as_::<BInt<{$lhs+$rhs}>>();
let rhs=rhs.bits.as_::<BInt<{$lhs+$rhs}>>();
Fixed::from_bits(lhs*rhs)
}
/// This operation cannot represent the fraction exactly,
/// but it shapes the output to have precision for the
/// largest and smallest possible fractions.
#[inline]
pub fn [<wide_div_ $lhs _ $rhs>](self,rhs:Fixed<$rhs,{$rhs*32}>)->Fixed<{$lhs+$rhs},{($lhs+$rhs)*32}>{
// (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
let lhs=self.bits.as_::<BInt<{$lhs+$rhs}>>().shl($rhs*64);
let rhs=rhs.bits.as_::<BInt<{$lhs+$rhs}>>();
Fixed::from_bits(lhs/rhs)
}
}
}
#[cfg(feature="wide-mul")]
impl_wide_operators!($lhs,$rhs);
};
}
macro_rules! impl_wide_same_size_not_const_generic{
(
(),
$width:expr
)=>{
impl Fixed<$width,{$width*32}>
{
paste::item!{
#[inline]
pub fn [<wide_mul_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
let (low,high)=self.bits.unsigned_abs().widening_mul(rhs.bits.unsigned_abs());
let out:BInt::<{$width*2}>=unsafe{core::mem::transmute([low,high])};
if self.is_negative()==rhs.is_negative(){
Fixed::from_bits(out)
}else{
// Normal neg is the cheapest negation operation
// And the inputs cannot reach the point where it matters
Fixed::from_bits(out.neg())
}
}
/// This operation cannot represent the fraction exactly,
/// but it shapes the output to have precision for the
/// largest and smallest possible fractions.
#[inline]
pub fn [<wide_div_ $width _ $width>](self,rhs:Fixed<$width,{$width*32}>)->Fixed<{$width*2},{$width*2*32}>{
// (lhs/2^LHS_FRAC)/(rhs/2^RHS_FRAC)
let lhs=self.bits.as_::<BInt<{$width*2}>>().shl($width*64);
let rhs=rhs.bits.as_::<BInt<{$width*2}>>();
Fixed::from_bits(lhs/rhs)
}
}
}
#[cfg(feature="wide-mul")]
impl_wide_operators!($width,$width);
};
}
//const generics sidestepped wahoo
macro_repeated!(
impl_wide_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),
(1,2), (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),
(1,3),(2,3), (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),
(1,4),(2,4),(3,4), (5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),
(1,5),(2,5),(3,5),(4,5), (6,5),(7,5),(8,5),(9,5),(10,5),(11,5),
(1,6),(2,6),(3,6),(4,6),(5,6), (7,6),(8,6),(9,6),(10,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7), (8,7),(9,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8), (9,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),
(1,11),(2,11),(3,11),(4,11),(5,11),
(1,12),(2,12),(3,12),(4,12),
(1,13),(2,13),(3,13),
(1,14),(2,14),
(1,15)
);
macro_repeated!(
impl_wide_same_size_not_const_generic,(),
1,2,3,4,5,6,7,8
);
pub trait Fix<Out>{
fn fix(self)->Out;
}
macro_rules! impl_fix_rhs_lt_lhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits.shr(($lhs-$rhs)*32)))
}
}
}
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
macro_rules! impl_fix_lhs_lt_rhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits).shl(($rhs-$lhs)*32))
}
}
}
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
self
}
}
}
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
// I LOVE NOT BEING ABLE TO USE CONST GENERICS
macro_repeated!(
impl_fix_rhs_lt_lhs_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(12,11),(13,11),(14,11),(15,11),(16,11),
(13,12),(14,12),(15,12),(16,12),
(14,13),(15,13),(16,13),
(15,14),(16,14),
(16,15)
);
macro_repeated!(
impl_fix_lhs_lt_rhs_not_const_generic,(),
(1,2),
(1,3),(2,3),
(1,4),(2,4),(3,4),
(1,5),(2,5),(3,5),(4,5),
(1,6),(2,6),(3,6),(4,6),(5,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16)
);
macro_repeated!(
impl_fix_lhs_eq_rhs_not_const_generic,(),
(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16)
);
macro_rules! impl_not_const_generic{
($n:expr,$_2n:expr)=>{
impl Fixed<$n,{$n*32}>{
paste::item!{
#[inline]
pub fn sqrt_unchecked(self)->Self{
//1<<max_shift must be the minimum power of two which when squared is greater than self
//calculating max_shift:
//1. count "used" bits to the left of the decimal, not including the sign bit (so -1)
//2. divide by 2 via >>1 (sqrt-ish)
//3. add on fractional offset
//Voila
let used_bits=self.bits.bits() as i32-1-($n*32) as i32;
let max_shift=((used_bits>>1)+($n*32) as i32) as u32;
let mut result=Self::ZERO;
//resize self to match the wide mul output
let wide_self=self.[<fix_ $_2n>]();
//descend down the bits and check if flipping each bit would push the square over the input value
for shift in (0..=max_shift).rev(){
let new_result={
let mut bits=result.to_bits().to_bits();
bits.set_bit(shift,true);
Self::from_bits(BInt::from_bits(bits))
};
if new_result.[<wide_mul_ $n _ $n>](new_result)<=wide_self{
result=new_result;
}
}
result
}
}
#[inline]
pub fn sqrt(self)->Self{
if self<Self::ZERO{
panic!("Square root less than zero")
}else{
self.sqrt_unchecked()
}
}
#[inline]
pub fn sqrt_checked(self)->Option<Self>{
if self<Self::ZERO{
None
}else{
Some(self.sqrt_unchecked())
}
}
}
}
}
impl_not_const_generic!(1,2);
impl_not_const_generic!(2,4);
impl_not_const_generic!(3,6);
impl_not_const_generic!(4,8);
impl_not_const_generic!(5,10);
impl_not_const_generic!(6,12);
impl_not_const_generic!(7,14);
impl_not_const_generic!(8,16);

View File

@ -1,8 +0,0 @@
pub mod fixed;
pub mod types;
#[cfg(feature="zeroes")]
pub mod zeroes;
#[cfg(test)]
mod tests;

View File

@ -1,218 +0,0 @@
use crate::types::I32F32;
use crate::types::I256F256;
#[test]
fn you_can_add_numbers(){
let a=I256F256::from((3i128*2).pow(4));
assert_eq!(a+a,I256F256::from((3i128*2).pow(4)*2));
}
#[test]
fn to_f32(){
let a=I256F256::from(1)>>2;
let f:f32=a.into();
assert_eq!(f,0.25f32);
let f:f32=(-a).into();
assert_eq!(f,-0.25f32);
let a=I256F256::from(0);
let f:f32=(-a).into();
assert_eq!(f,0f32);
let a=I256F256::from(237946589723468975i64)<<16;
let f:f32=a.into();
assert_eq!(f,237946589723468975f32*2.0f32.powi(16));
}
#[test]
fn to_f64(){
let a=I256F256::from(1)>>2;
let f:f64=a.into();
assert_eq!(f,0.25f64);
let f:f64=(-a).into();
assert_eq!(f,-0.25f64);
let a=I256F256::from(0);
let f:f64=(-a).into();
assert_eq!(f,0f64);
let a=I256F256::from(237946589723468975i64)<<16;
let f:f64=a.into();
assert_eq!(f,237946589723468975f64*2.0f64.powi(16));
}
#[test]
fn from_f32(){
let a=I256F256::from(1)>>2;
let b:Result<I256F256,_>=0.25f32.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(-1)>>2;
let b:Result<I256F256,_>=(-0.25f32).try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0);
let b:Result<I256F256,_>=0.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
let b:Result<I256F256,_>=(0b101011110101001010101010000000000000000000000000000u64 as f32*2.0f32.powi(16)).try_into();
assert_eq!(b,Ok(a));
//I32F32::MAX into f32 is truncated into this value
let a=I32F32::raw(0b111111111111111111111111000000000000000000000000000000000000000i64);
let b:Result<I32F32,_>=Into::<f32>::into(I32F32::MAX).try_into();
assert_eq!(b,Ok(a));
//I32F32::MIN hits a special case since it's not representable as a positive signed integer
//TODO: don't return an overflow because this is technically possible
let a=I32F32::MIN;
let b:Result<I32F32,_>=Into::<f32>::into(I32F32::MIN).try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
//16 is within the 24 bits of float precision
let b:Result<I32F32,_>=Into::<f32>::into(-I32F32::MIN.fix_2()).try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
let b:Result<I32F32,_>=f32::MIN_POSITIVE.try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Underflow));
//test many cases
for i in 0..64{
let a=crate::fixed::Fixed::<2,64>::raw_digit(0b111111111111111111111111000000000000000000000000000000000000000i64)<<i;
let f:f32=a.into();
let b:Result<crate::fixed::Fixed<2,64>,_>=f.try_into();
assert_eq!(b,Ok(a));
}
}
#[test]
fn from_f64(){
let a=I256F256::from(1)>>2;
let b:Result<I256F256,_>=0.25f64.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(-1)>>2;
let b:Result<I256F256,_>=(-0.25f64).try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0);
let b:Result<I256F256,_>=0.try_into();
assert_eq!(b,Ok(a));
let a=I256F256::from(0b101011110101001010101010000000000000000000000000000i64)<<16;
let b:Result<I256F256,_>=(0b101011110101001010101010000000000000000000000000000u64 as f64*2.0f64.powi(16)).try_into();
assert_eq!(b,Ok(a));
}
#[test]
fn you_can_shr_numbers(){
let a=I32F32::from(4);
assert_eq!(a>>1,I32F32::from(2));
}
#[test]
fn test_wide_mul(){
let a=I32F32::ONE;
let aa=a.wide_mul_1_1(a);
assert_eq!(aa,crate::types::I64F64::ONE);
}
#[test]
fn test_wide_div(){
let a=I32F32::ONE*4;
let b=I32F32::ONE*2;
let wide_a=a.wide_mul_1_1(I32F32::ONE);
let wide_b=b.wide_mul_1_1(I32F32::ONE);
let ab=a.wide_div_1_1(b);
assert_eq!(ab,crate::types::I64F64::ONE*2);
let wab=wide_a.wide_div_2_1(b);
assert_eq!(wab,crate::fixed::Fixed::<3,96>::ONE*2);
let awb=a.wide_div_1_2(wide_b);
assert_eq!(awb,crate::fixed::Fixed::<3,96>::ONE*2);
}
#[test]
fn test_wide_mul_repeated() {
let a=I32F32::from(2);
let b=I32F32::from(3);
let w1=a.wide_mul_1_1(b);
let w2=w1.wide_mul_2_2(w1);
let w3=w2.wide_mul_4_4(w2);
assert_eq!(w3,I256F256::from((3i128*2).pow(4)));
}
#[test]
fn test_bint(){
let a=I32F32::ONE;
assert_eq!(a*2,I32F32::from(2));
}
#[test]
fn test_fix(){
assert_eq!(I32F32::ONE.fix_8(),I256F256::ONE);
assert_eq!(I32F32::ONE,I256F256::ONE.fix_1());
assert_eq!(I32F32::NEG_ONE.fix_8(),I256F256::NEG_ONE);
assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.fix_1());
}
#[test]
fn test_sqrt(){
let a=I32F32::ONE*4;
assert_eq!(a.sqrt(),I32F32::from(2));
}
#[test]
fn test_sqrt_zero(){
let a=I32F32::ZERO;
assert_eq!(a.sqrt(),I32F32::ZERO);
}
#[test]
fn test_sqrt_low(){
let a=I32F32::HALF;
let b=a.fixed_mul(a);
assert_eq!(b.sqrt(),a);
}
fn find_equiv_sqrt_via_f64(n:I32F32)->I32F32{
//GIMME THEM BITS BOY
let &[bits]=n.to_bits().to_bits().digits();
let ibits=bits as i64;
let f=(ibits as f64)/((1u64<<32) as f64);
let f_ans=f.sqrt();
let i=(f_ans*((1u64<<32) as f64)) as i64;
let r=I32F32::from_bits(bnum::BInt::<1>::from(i));
//mimic the behaviour of the algorithm,
//return the result if it truncates to the exact answer
if (r+I32F32::EPSILON).wide_mul_1_1(r+I32F32::EPSILON)==n.wide_mul_1_1(I32F32::ONE){
return r+I32F32::EPSILON;
}
if (r-I32F32::EPSILON).wide_mul_1_1(r-I32F32::EPSILON)==n.wide_mul_1_1(I32F32::ONE){
return r-I32F32::EPSILON;
}
return r;
}
fn test_exact(n:I32F32){
assert_eq!(n.sqrt(),find_equiv_sqrt_via_f64(n));
}
#[test]
fn test_sqrt_exact(){
//43
for i in 0..((i64::MAX as f32).ln() as u32){
let n=I32F32::from_bits(bnum::BInt::<1>::from((i as f32).exp() as i64));
test_exact(n);
}
}
#[test]
fn test_sqrt_max(){
let a=I32F32::MAX;
test_exact(a);
}
#[test]
#[cfg(all(feature="zeroes",not(feature="deferred-division")))]
fn test_zeroes_normal(){
// (x-1)*(x+1)
// x^2-1
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE,I32F32::ONE]));
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE*3,I32F32::ONE*2,I32F32::ONE);
assert_eq!(zeroes,arrayvec::ArrayVec::from_iter([I32F32::NEG_ONE*3,I32F32::ONE]));
}
#[test]
#[cfg(all(feature="zeroes",feature="deferred-division"))]
fn test_zeroes_deferred_division(){
// (x-1)*(x+1)
// x^2-1
let zeroes=I32F32::zeroes2(I32F32::NEG_ONE,I32F32::ZERO,I32F32::ONE);
assert_eq!(
zeroes,
arrayvec::ArrayVec::from_iter([
ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::NEG_ONE*2),
ratio_ops::ratio::Ratio::new(I32F32::ONE*2,I32F32::ONE*2),
])
);
}

View File

@ -1,4 +0,0 @@
pub type I32F32=crate::fixed::Fixed<1,32>;
pub type I64F64=crate::fixed::Fixed<2,64>;
pub type I128F128=crate::fixed::Fixed<4,128>;
pub type I256F256=crate::fixed::Fixed<8,256>;

View File

@ -1,53 +0,0 @@
use crate::fixed::Fixed;
use arrayvec::ArrayVec;
use std::cmp::Ordering;
macro_rules! impl_zeroes{
($n:expr)=>{
impl Fixed<$n,{$n*32}>{
#[inline]
pub fn zeroes2(a0:Self,a1:Self,a2:Self)->ArrayVec<<Self as core::ops::Div>::Output,2>{
let a2pos=match a2.cmp(&Self::ZERO){
Ordering::Greater=>true,
Ordering::Equal=>return ArrayVec::from_iter(Self::zeroes1(a0,a1).into_iter()),
Ordering::Less=>false,
};
let radicand=a1*a1-a2*a0*4;
match radicand.cmp(&<Self as core::ops::Mul>::Output::ZERO){
Ordering::Greater=>{
paste::item!{
let planar_radicand=radicand.sqrt().[<fix_ $n>]();
}
//sort roots ascending and avoid taking the difference of large numbers
let zeroes=match (a2pos,Self::ZERO<a1){
(true, true )=>[(-a1-planar_radicand)/(a2*2),(a0*2)/(-a1-planar_radicand)],
(true, false)=>[(a0*2)/(-a1+planar_radicand),(-a1+planar_radicand)/(a2*2)],
(false,true )=>[(a0*2)/(-a1-planar_radicand),(-a1-planar_radicand)/(a2*2)],
(false,false)=>[(-a1+planar_radicand)/(a2*2),(a0*2)/(-a1+planar_radicand)],
};
ArrayVec::from_iter(zeroes)
},
Ordering::Equal=>ArrayVec::from_iter([(a1)/(a2*-2)]),
Ordering::Less=>ArrayVec::new_const(),
}
}
#[inline]
pub fn zeroes1(a0:Self,a1:Self)->ArrayVec<<Self as core::ops::Div>::Output,1>{
if a1==Self::ZERO{
ArrayVec::new_const()
}else{
ArrayVec::from_iter([(-a0)/(a1)])
}
}
}
};
}
impl_zeroes!(1);
impl_zeroes!(2);
impl_zeroes!(3);
impl_zeroes!(4);
//sqrt doubles twice!
//impl_zeroes!(5);
//impl_zeroes!(6);
//impl_zeroes!(7);
//impl_zeroes!(8);

View File

@ -1 +0,0 @@
/target

View File

@ -1,22 +0,0 @@
[package]
name = "linear_ops"
version = "0.1.0"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Vector/Matrix operations using trait bounds."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
[features]
default=["named-fields","fixed-wide"]
named-fields=[]
fixed-wide=["dep:fixed_wide","dep:paste"]
deferred-division=["dep:ratio_ops"]
[dependencies]
ratio_ops = { path = "../ratio_ops", registry = "strafesnet", optional = true }
fixed_wide = { path = "../fixed_wide", registry = "strafesnet", optional = true }
paste = { version = "1.0.15", optional = true }
[dev-dependencies]
fixed_wide = { path = "../fixed_wide", registry = "strafesnet", features = ["wide-mul"] }

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,10 +0,0 @@
mod macros;
pub mod types;
pub mod vector;
pub mod matrix;
#[cfg(feature="named-fields")]
mod named;
#[cfg(test)]
mod tests;

View File

@ -1 +0,0 @@

View File

@ -1,79 +0,0 @@
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_fixed_wide_vector_not_const_generic {
(
(),
$n:expr
) => {
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$n,{$n*32}>>{
#[inline]
pub fn length(self)-><fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output{
self.length_squared().sqrt_unchecked()
}
#[inline]
pub fn with_length<U,V>(self,length:U)-><Vector<N,V> as core::ops::Div<<fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output>>::Output
where
fixed_wide::fixed::Fixed<$n,{$n*32}>:core::ops::Mul<U,Output=V>,
U:Copy,
V:core::ops::Div<<fixed_wide::fixed::Fixed::<$n,{$n*32}> as core::ops::Mul>::Output>,
{
self*length/self.length()
}
}
};
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! macro_4 {
( $macro: ident, $any:tt ) => {
$crate::macro_repeated!($macro,$any,1,2,3,4);
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_fixed_wide_vector {
() => {
$crate::macro_4!(impl_fixed_wide_vector_not_const_generic,());
// I LOVE NOT BEING ABLE TO USE CONST GENERICS
$crate::macro_repeated!(
impl_fix_not_const_generic,(),
(1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),
(1,2),(2,2),(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(1,3),(2,3),(3,3),(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(1,4),(2,4),(3,4),(4,4),(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(1,5),(2,5),(3,5),(4,5),(5,5),(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(1,6),(2,6),(3,6),(4,6),(5,6),(6,6),(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),(7,7),(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),(8,8),(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),(9,9),(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),(10,10),(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),(11,11),(12,11),(13,11),(14,11),(15,11),(16,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),(12,12),(13,12),(14,12),(15,12),(16,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),(13,13),(14,13),(15,13),(16,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),(14,14),(15,14),(16,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),(15,15),(16,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),(16,16)
);
};
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_fix_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>
{
paste::item!{
#[inline]
pub fn [<fix_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
self.map(|t|t.[<fix_ $rhs>]())
}
}
}
}
}

View File

@ -1,272 +0,0 @@
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix {
() => {
impl<const X:usize,const Y:usize,T> Matrix<X,Y,T>{
#[inline(always)]
pub const fn new(array:[[T;Y];X])->Self{
Self{array}
}
#[inline(always)]
pub fn to_array(self)->[[T;Y];X]{
self.array
}
#[inline]
pub fn from_cols(cols:[Vector<Y,T>;X])->Self
{
Matrix::new(
cols.map(|col|col.array),
)
}
#[inline]
pub fn map<F,U>(self,f:F)->Matrix<X,Y,U>
where
F:Fn(T)->U
{
Matrix::new(
self.array.map(|inner|inner.map(&f)),
)
}
#[inline]
pub fn transpose(self)->Matrix<Y,X,T>{
//how did I think of this
let mut array_of_iterators=self.array.map(|axis|axis.into_iter());
Matrix::new(
core::array::from_fn(|_|
array_of_iterators.each_mut().map(|iter|
iter.next().unwrap()
)
)
)
}
#[inline]
// old (list of rows) MatY<VecX>.MatX<VecZ> = MatY<VecZ>
// new (list of columns) MatX<VecY>.MatZ<VecX> = MatZ<VecY>
pub fn dot<const Z:usize,U,V>(self,rhs:Matrix<Z,X,U>)->Matrix<Z,Y,V>
where
T:core::ops::Mul<U,Output=V>+Copy,
V:core::iter::Sum,
U:Copy,
{
let mut array_of_iterators=self.array.map(|axis|axis.into_iter().cycle());
Matrix{
array:rhs.array.map(|rhs_axis|
core::array::from_fn(|_|
array_of_iterators
.iter_mut()
.zip(rhs_axis.iter())
.map(|(lhs_iter,&rhs_value)|
lhs_iter.next().unwrap()*rhs_value
).sum()
)
)
}
}
#[inline]
// MatX<VecY>.VecY = VecX
pub fn transform_vector<U,V>(self,rhs:Vector<X,U>)->Vector<Y,V>
where
T:core::ops::Mul<U,Output=V>,
V:core::iter::Sum,
U:Copy,
{
let mut array_of_iterators=self.array.map(|axis|axis.into_iter());
Vector::new(
core::array::from_fn(|_|
array_of_iterators
.iter_mut()
.zip(rhs.array.iter())
.map(|(lhs_iter,&rhs_value)|
lhs_iter.next().unwrap()*rhs_value
).sum()
)
)
}
}
impl<const X:usize,const Y:usize,T> Matrix<X,Y,T>
where
T:Copy
{
#[inline(always)]
pub const fn from_value(value:T)->Self{
Self::new([[value;Y];X])
}
}
impl<const X:usize,const Y:usize,T:Default> Default for Matrix<X,Y,T>{
#[inline]
fn default()->Self{
Self::new(
core::array::from_fn(|_|core::array::from_fn(|_|Default::default()))
)
}
}
impl<const X:usize,const Y:usize,T:core::fmt::Display> core::fmt::Display for Matrix<X,Y,T>{
#[inline]
fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
for col in &self.array[0..X]{
core::write!(f,"\n")?;
for elem in &col[0..Y-1]{
core::write!(f,"{}, ",elem)?;
}
// assume we will be using matrices of size 1x1 or greater
core::write!(f,"{}",col.last().unwrap())?;
}
Ok(())
}
}
impl<const X:usize,const Y:usize,const Z:usize,T,U,V> core::ops::Mul<Matrix<Z,X,U>> for Matrix<X,Y,T>
where
T:core::ops::Mul<U,Output=V>+Copy,
V:core::iter::Sum,
U:Copy,
{
type Output=Matrix<Z,Y,V>;
#[inline]
fn mul(self,rhs:Matrix<Z,X,U>)->Self::Output{
self.dot(rhs)
}
}
impl<const X:usize,const Y:usize,T,U,V> core::ops::Mul<Vector<X,U>> for Matrix<X,Y,T>
where
T:core::ops::Mul<U,Output=V>,
V:core::iter::Sum,
U:Copy,
{
type Output=Vector<Y,V>;
#[inline]
fn mul(self,rhs:Vector<X,U>)->Self::Output{
self.transform_vector(rhs)
}
}
#[cfg(feature="deferred-division")]
$crate::impl_matrix_deferred_division!();
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix_deferred_division {
() => {
impl<const X:usize,const Y:usize,T:ratio_ops::ratio::Divide<U,Output=V>,U:Copy,V> ratio_ops::ratio::Divide<U> for Matrix<X,Y,T>{
type Output=Matrix<X,Y,V>;
#[inline]
fn divide(self,rhs:U)->Self::Output{
self.map(|t|t.divide(rhs))
}
}
impl<const X:usize,const Y:usize,T,U> core::ops::Div<U> for Matrix<X,Y,T>{
type Output=ratio_ops::ratio::Ratio<Matrix<X,Y,T>,U>;
#[inline]
fn div(self,rhs:U)->Self::Output{
ratio_ops::ratio::Ratio::new(self,rhs)
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix_extend {
( $x: expr, $y: expr ) => {
impl<T> Matrix<$x,$y,T>{
#[inline]
pub fn extend_column(self,value:Vector<$y,T>)->Matrix<{$x+1},$y,T>{
let mut iter=self.array.into_iter().chain(core::iter::once(value.array));
Matrix::new(
core::array::from_fn(|_|iter.next().unwrap()),
)
}
#[inline]
pub fn extend_row(self,value:Vector<$x,T>)->Matrix<$x,{$y+1},T>{
let mut iter_rows=value.array.into_iter();
Matrix::new(
self.array.map(|axis|{
let mut elements_iter=axis.into_iter().chain(core::iter::once(iter_rows.next().unwrap()));
core::array::from_fn(|_|elements_iter.next().unwrap())
})
)
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix_named_fields_shape {
(
($struct_outer:ident, $size_outer: expr),
($size_inner: expr)
) => {
impl<T> core::ops::Deref for Matrix<$size_outer,$size_inner,T>{
type Target=$struct_outer<Vector<$size_inner,T>>;
#[inline]
fn deref(&self)->&Self::Target{
unsafe{core::mem::transmute(&self.array)}
}
}
impl<T> core::ops::DerefMut for Matrix<$size_outer,$size_inner,T>{
#[inline]
fn deref_mut(&mut self)->&mut Self::Target{
unsafe{core::mem::transmute(&mut self.array)}
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix_named_fields_shape_shim {
(
($($vector_info:tt),+),
$matrix_info:tt
) => {
$crate::macro_repeated!(impl_matrix_named_fields_shape,$matrix_info,$($vector_info),+);
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix_named_fields {
(
($($matrix_info:tt),+),
$vector_infos:tt
) => {
$crate::macro_repeated!(impl_matrix_named_fields_shape_shim,$vector_infos,$($matrix_info),+);
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_matrix_3x3 {
()=>{
impl<T,T2,T3> Matrix<3,3,T>
where
//cross
T:core::ops::Mul<T,Output=T2>+Copy,
T2:core::ops::Sub,
//dot
T:core::ops::Mul<<T2 as core::ops::Sub>::Output,Output=T3>,
T3:core::iter::Sum,
{
pub fn det(self)->T3{
self.x_axis.dot(self.y_axis.cross(self.z_axis))
}
}
impl<T,T2> Matrix<3,3,T>
where
T:core::ops::Mul<T,Output=T2>+Copy,
T2:core::ops::Sub,
{
pub fn adjugate(self)->Matrix<3,3,<T2 as core::ops::Sub>::Output>{
Matrix::new([
[self.y_axis.y*self.z_axis.z-self.y_axis.z*self.z_axis.y,self.x_axis.z*self.z_axis.y-self.x_axis.y*self.z_axis.z,self.x_axis.y*self.y_axis.z-self.x_axis.z*self.y_axis.y],
[self.y_axis.z*self.z_axis.x-self.y_axis.x*self.z_axis.z,self.x_axis.x*self.z_axis.z-self.x_axis.z*self.z_axis.x,self.x_axis.z*self.y_axis.x-self.x_axis.x*self.y_axis.z],
[self.y_axis.x*self.z_axis.y-self.y_axis.y*self.z_axis.x,self.x_axis.y*self.z_axis.x-self.x_axis.x*self.z_axis.y,self.x_axis.x*self.y_axis.y-self.x_axis.y*self.y_axis.x],
])
}
}
}
}

View File

@ -1,20 +0,0 @@
pub mod common;
pub mod vector;
pub mod matrix;
#[cfg(feature="fixed-wide")]
pub mod fixed_wide;
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! macro_repeated{
(
$macro:ident,
$any:tt,
$($repeated:tt),*
)=>{
$(
$crate::$macro!($any, $repeated);
)*
};
}

View File

@ -1,357 +0,0 @@
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector {
() => {
impl<const N:usize,T> Vector<N,T>{
#[inline(always)]
pub const fn new(array:[T;N])->Self{
Self{array}
}
#[inline(always)]
pub fn to_array(self)->[T;N]{
self.array
}
#[inline]
pub fn map<F,U>(self,f:F)->Vector<N,U>
where
F:Fn(T)->U
{
Vector::new(
self.array.map(f)
)
}
#[inline]
pub fn map_zip<F,U,V>(self,other:Vector<N,U>,f:F)->Vector<N,V>
where
F:Fn((T,U))->V,
{
let mut iter=self.array.into_iter().zip(other.array);
Vector::new(
core::array::from_fn(|_|f(iter.next().unwrap())),
)
}
}
impl<const N:usize,T:Copy> Vector<N,T>{
#[inline(always)]
pub const fn from_value(value:T)->Self{
Self::new([value;N])
}
}
impl<const N:usize,T:Default> Default for Vector<N,T>{
#[inline]
fn default()->Self{
Self::new(
core::array::from_fn(|_|Default::default())
)
}
}
impl<const N:usize,T:core::fmt::Display> core::fmt::Display for Vector<N,T>{
#[inline]
fn fmt(&self,f:&mut core::fmt::Formatter)->Result<(),core::fmt::Error>{
for elem in &self.array[0..N-1]{
core::write!(f,"{}, ",elem)?;
}
// assume we will be using vectors of length 1 or greater
core::write!(f,"{}",self.array.last().unwrap())
}
}
impl<const N:usize,T:Ord> Vector<N,T>{
#[inline]
pub fn min(self,rhs:Self)->Self{
self.map_zip(rhs,|(a,b)|a.min(b))
}
#[inline]
pub fn max(self,rhs:Self)->Self{
self.map_zip(rhs,|(a,b)|a.max(b))
}
#[inline]
pub fn cmp(self,rhs:Self)->Vector<N,core::cmp::Ordering>{
self.map_zip(rhs,|(a,b)|a.cmp(&b))
}
#[inline]
pub fn lt(self,rhs:Self)->Vector<N,bool>{
self.map_zip(rhs,|(a,b)|a.lt(&b))
}
#[inline]
pub fn gt(self,rhs:Self)->Vector<N,bool>{
self.map_zip(rhs,|(a,b)|a.gt(&b))
}
#[inline]
pub fn ge(self,rhs:Self)->Vector<N,bool>{
self.map_zip(rhs,|(a,b)|a.ge(&b))
}
#[inline]
pub fn le(self,rhs:Self)->Vector<N,bool>{
self.map_zip(rhs,|(a,b)|a.le(&b))
}
}
impl<const N:usize> Vector<N,bool>{
#[inline]
pub fn all(&self)->bool{
self.array==[true;N]
}
#[inline]
pub fn any(&self)->bool{
self.array!=[false;N]
}
}
impl<const N:usize,T:core::ops::Neg<Output=V>,V> core::ops::Neg for Vector<N,T>{
type Output=Vector<N,V>;
#[inline]
fn neg(self)->Self::Output{
Vector::new(
self.array.map(|t|-t)
)
}
}
impl<const N:usize,T> Vector<N,T>
{
#[inline]
pub fn dot<U,V>(self,rhs:Vector<N,U>)->V
where
T:core::ops::Mul<U,Output=V>,
V:core::iter::Sum,
{
self.array.into_iter().zip(rhs.array).map(|(a,b)|a*b).sum()
}
}
impl<const N:usize,T,V> Vector<N,T>
where
T:core::ops::Mul<Output=V>+Copy,
V:core::iter::Sum,
{
#[inline]
pub fn length_squared(self)->V{
self.array.into_iter().map(|t|t*t).sum()
}
}
// Impl arithmetic operators
$crate::impl_vector_assign_operator!(AddAssign, add_assign );
$crate::impl_vector_operator!(Add, add );
$crate::impl_vector_assign_operator!(SubAssign, sub_assign );
$crate::impl_vector_operator!(Sub, sub );
$crate::impl_vector_assign_operator!(RemAssign, rem_assign );
$crate::impl_vector_operator!(Rem, rem );
// mul and div are special, usually you multiply by a scalar
// and implementing both vec*vec and vec*scalar is conflicting implementations Q_Q
$crate::impl_vector_assign_operator_scalar!(MulAssign, mul_assign );
$crate::impl_vector_operator_scalar!(Mul, mul );
$crate::impl_vector_assign_operator_scalar!(DivAssign, div_assign );
#[cfg(not(feature="deferred-division"))]
$crate::impl_vector_operator_scalar!(Div, div );
#[cfg(feature="deferred-division")]
$crate::impl_vector_deferred_division!();
// Impl bitwise operators
$crate::impl_vector_assign_operator!(BitAndAssign, bitand_assign );
$crate::impl_vector_operator!(BitAnd, bitand );
$crate::impl_vector_assign_operator!(BitOrAssign, bitor_assign );
$crate::impl_vector_operator!(BitOr, bitor );
$crate::impl_vector_assign_operator!(BitXorAssign, bitxor_assign );
$crate::impl_vector_operator!(BitXor, bitxor );
// Impl shift operators
$crate::impl_vector_shift_assign_operator!(ShlAssign, shl_assign);
$crate::impl_vector_shift_operator!(Shl, shl);
$crate::impl_vector_shift_assign_operator!(ShrAssign, shr_assign);
$crate::impl_vector_shift_operator!(Shr, shr);
// dedicated methods for this type
#[cfg(feature="fixed-wide")]
$crate::impl_fixed_wide_vector!();
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_deferred_division {
() => {
impl<const N:usize,T:ratio_ops::ratio::Divide<U,Output=V>,U:Copy,V> ratio_ops::ratio::Divide<U> for Vector<N,T>{
type Output=Vector<N,V>;
#[inline]
fn divide(self,rhs:U)->Self::Output{
self.map(|t|t.divide(rhs))
}
}
impl<const N:usize,T,U> core::ops::Div<U> for Vector<N,T>{
type Output=ratio_ops::ratio::Ratio<Vector<N,T>,U>;
#[inline]
fn div(self,rhs:U)->Self::Output{
ratio_ops::ratio::Ratio::new(self,rhs)
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_operator_scalar {
($trait: ident, $method: ident ) => {
impl<const N:usize,T:core::ops::$trait<U,Output=V>,U:Copy,V> core::ops::$trait<U> for Vector<N,T>{
type Output=Vector<N,V>;
#[inline]
fn $method(self,rhs:U)->Self::Output{
self.map(|t|t.$method(rhs))
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_operator {
($trait: ident, $method: ident ) => {
impl<const N:usize,T:core::ops::$trait<U,Output=V>,U,V> core::ops::$trait<Vector<N,U>> for Vector<N,T>{
type Output=Vector<N,V>;
#[inline]
fn $method(self,rhs:Vector<N,U>)->Self::Output{
self.map_zip(rhs,|(a,b)|a.$method(b))
}
}
impl<const N:usize,T:core::ops::$trait<i64,Output=T>> core::ops::$trait<i64> for Vector<N,T>{
type Output=Self;
#[inline]
fn $method(self,rhs:i64)->Self::Output{
self.map(|t|t.$method(rhs))
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_assign_operator_scalar {
($trait: ident, $method: ident ) => {
impl<const N:usize,T:core::ops::$trait<U>,U:Copy> core::ops::$trait<U> for Vector<N,T>{
#[inline]
fn $method(&mut self,rhs:U){
self.array.iter_mut()
.for_each(|t|t.$method(rhs))
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_assign_operator {
($trait: ident, $method: ident ) => {
impl<const N:usize,T:core::ops::$trait<U>,U> core::ops::$trait<Vector<N,U>> for Vector<N,T>{
#[inline]
fn $method(&mut self,rhs:Vector<N,U>){
self.array.iter_mut().zip(rhs.array)
.for_each(|(a,b)|a.$method(b))
}
}
impl<const N:usize,T:core::ops::$trait<i64>> core::ops::$trait<i64> for Vector<N,T>{
#[inline]
fn $method(&mut self,rhs:i64){
self.array.iter_mut()
.for_each(|t|t.$method(rhs))
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_shift_operator {
($trait: ident, $method: ident ) => {
impl<const N:usize,T:core::ops::$trait<U,Output=V>,U,V> core::ops::$trait<Vector<N,U>> for Vector<N,T>{
type Output=Vector<N,V>;
#[inline]
fn $method(self,rhs:Vector<N,U>)->Self::Output{
self.map_zip(rhs,|(a,b)|a.$method(b))
}
}
impl<const N:usize,T:core::ops::$trait<u32,Output=V>,V> core::ops::$trait<u32> for Vector<N,T>{
type Output=Vector<N,V>;
#[inline]
fn $method(self,rhs:u32)->Self::Output{
self.map(|t|t.$method(rhs))
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_shift_assign_operator {
($trait: ident, $method: ident ) => {
impl<const N:usize,T:core::ops::$trait<U>,U> core::ops::$trait<Vector<N,U>> for Vector<N,T>{
#[inline]
fn $method(&mut self,rhs:Vector<N,U>){
self.array.iter_mut().zip(rhs.array)
.for_each(|(a,b)|a.$method(b))
}
}
impl<const N:usize,T:core::ops::$trait<u32>> core::ops::$trait<u32> for Vector<N,T>{
#[inline]
fn $method(&mut self,rhs:u32){
self.array.iter_mut()
.for_each(|t|t.$method(rhs))
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_extend {
( $size: expr ) => {
impl<T> Vector<$size,T>{
#[inline]
pub fn extend(self,value:T)->Vector<{$size+1},T>{
let mut iter=self.array.into_iter().chain(core::iter::once(value));
Vector::new(
core::array::from_fn(|_|iter.next().unwrap()),
)
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_named_fields {
( $struct:ident, $size: expr ) => {
impl<T> core::ops::Deref for Vector<$size,T>{
type Target=$struct<T>;
#[inline]
fn deref(&self)->&Self::Target{
unsafe{core::mem::transmute(&self.array)}
}
}
impl<T> core::ops::DerefMut for Vector<$size,T>{
#[inline]
fn deref_mut(&mut self)->&mut Self::Target{
unsafe{core::mem::transmute(&mut self.array)}
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_vector_3 {
()=>{
impl<T> Vector<3,T>
{
#[inline]
pub fn cross<U,V>(self,rhs:Vector<3,U>)->Vector<3,<V as core::ops::Sub>::Output>
where
T:core::ops::Mul<U,Output=V>+Copy,
U:Copy,
V:core::ops::Sub,
{
Vector::new([
self.y*rhs.z-self.z*rhs.y,
self.z*rhs.x-self.x*rhs.z,
self.x*rhs.y-self.y*rhs.x,
])
}
}
}
}

View File

@ -1,17 +0,0 @@
use crate::vector::Vector;
#[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)]
pub struct Matrix<const X:usize,const Y:usize,T>{
pub(crate) array:[[T;Y];X],
}
crate::impl_matrix!();
crate::impl_matrix_extend!(2,2);
crate::impl_matrix_extend!(2,3);
crate::impl_matrix_extend!(3,2);
crate::impl_matrix_extend!(3,3);
//Special case 3x3 matrix operations because I cba to write macros for the arbitrary cases
#[cfg(feature="named-fields")]
crate::impl_matrix_3x3!();

View File

@ -1,59 +0,0 @@
use crate::vector::Vector;
use crate::matrix::Matrix;
#[repr(C)]
pub struct Vector2<T> {
pub x: T,
pub y: T,
}
#[repr(C)]
pub struct Vector3<T> {
pub x: T,
pub y: T,
pub z: T,
}
#[repr(C)]
pub struct Vector4<T> {
pub x: T,
pub y: T,
pub z: T,
pub w: T,
}
crate::impl_vector_named_fields!(Vector2, 2);
crate::impl_vector_named_fields!(Vector3, 3);
crate::impl_vector_named_fields!(Vector4, 4);
#[repr(C)]
pub struct Matrix2<T> {
pub x_axis: T,
pub y_axis: T,
}
#[repr(C)]
pub struct Matrix3<T> {
pub x_axis: T,
pub y_axis: T,
pub z_axis: T,
}
#[repr(C)]
pub struct Matrix4<T> {
pub x_axis: T,
pub y_axis: T,
pub z_axis: T,
pub w_axis: T,
}
crate::impl_matrix_named_fields!(
//outer struct
(
(Matrix2, 2),
(Matrix3, 3),
(Matrix4, 4)
),
//inner struct
(
(2),
(3),
(4)
)
);

View File

@ -1,96 +0,0 @@
use crate::types::{Matrix3,Matrix3x2,Matrix3x4,Matrix4x2,Vector3};
type Planar64=fixed_wide::types::I32F32;
type Planar64Wide1=fixed_wide::types::I64F64;
//type Planar64Wide2=fixed_wide::types::I128F128;
type Planar64Wide3=fixed_wide::types::I256F256;
#[test]
fn wide_vec3(){
let v=Vector3::from_value(Planar64::from(3));
let v1=v*v.x;
let v2=v1*v1.y;
let v3=v2*v2.z;
assert_eq!(v3.array,Vector3::from_value(Planar64Wide3::from(3i128.pow(8))).array);
}
#[test]
fn wide_vec3_dot(){
let v=Vector3::from_value(Planar64::from(3));
let v1=v*v.x;
let v2=v1*v1.y;
let v3=v2.dot(v2);
assert_eq!(v3,Planar64Wide3::from(3i128.pow(8)*3));
}
#[test]
fn wide_vec3_length_squared(){
let v=Vector3::from_value(Planar64::from(3));
let v1=v*v.x;
let v2=v1*v1.y;
let v3=v2.length_squared();
assert_eq!(v3,Planar64Wide3::from(3i128.pow(8)*3));
}
#[test]
fn wide_matrix_dot(){
let lhs=Matrix3x4::new([
[Planar64::from(1),Planar64::from(2),Planar64::from(3),Planar64::from(4)],
[Planar64::from(5),Planar64::from(6),Planar64::from(7),Planar64::from(8)],
[Planar64::from(9),Planar64::from(10),Planar64::from(11),Planar64::from(12)],
]).transpose();
let rhs=Matrix4x2::new([
[Planar64::from(1),Planar64::from(2)],
[Planar64::from(3),Planar64::from(4)],
[Planar64::from(5),Planar64::from(6)],
[Planar64::from(7),Planar64::from(8)],
]).transpose();
// Mat3<Vec4>.dot(Mat4<Vec2>) -> Mat3<Vec2>
let m_dot=lhs*rhs;
//In[1]:= {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}} . {{1, 2}, {3, 4}, {5, 6}, {7, 8}}
//Out[1]= {{50, 60}, {114, 140}, {178, 220}}
assert_eq!(
m_dot.array,
Matrix3x2::new([
[Planar64Wide1::from(50),Planar64Wide1::from(60)],
[Planar64Wide1::from(114),Planar64Wide1::from(140)],
[Planar64Wide1::from(178),Planar64Wide1::from(220)],
]).transpose().array
);
}
#[test]
#[cfg(feature="named-fields")]
fn wide_matrix_det(){
let m=Matrix3::new([
[Planar64::from(1),Planar64::from(2),Planar64::from(3)],
[Planar64::from(4),Planar64::from(5),Planar64::from(7)],
[Planar64::from(6),Planar64::from(8),Planar64::from(9)],
]);
// In[2]:= Det[{{1, 2, 3}, {4, 5, 7}, {6, 8, 9}}]
// Out[2]= 7
assert_eq!(m.det(),fixed_wide::fixed::Fixed::<3,96>::from(7));
}
#[test]
#[cfg(feature="named-fields")]
fn wide_matrix_adjugate(){
let m=Matrix3::new([
[Planar64::from(1),Planar64::from(2),Planar64::from(3)],
[Planar64::from(4),Planar64::from(5),Planar64::from(7)],
[Planar64::from(6),Planar64::from(8),Planar64::from(9)],
]);
// In[6]:= Adjugate[{{1, 2, 3}, {4, 5, 7}, {6, 8, 9}}]
// Out[6]= {{-11, 6, -1}, {6, -9, 5}, {2, 4, -3}}
assert_eq!(
m.adjugate().array,
Matrix3::new([
[Planar64Wide1::from(-11),Planar64Wide1::from(6),Planar64Wide1::from(-1)],
[Planar64Wide1::from(6),Planar64Wide1::from(-9),Planar64Wide1::from(5)],
[Planar64Wide1::from(2),Planar64Wide1::from(4),Planar64Wide1::from(-3)],
]).array
);
}

View File

@ -1,6 +0,0 @@
mod tests;
#[cfg(feature="named-fields")]
mod named;
mod fixed_wide;

View File

@ -1,30 +0,0 @@
use crate::types::{Vector3,Matrix3};
#[test]
fn test_vector(){
let mut v=Vector3::new([1,2,3]);
assert_eq!(v.x,1);
assert_eq!(v.y,2);
assert_eq!(v.z,3);
v.x=5;
assert_eq!(v.x,5);
v.y*=v.x;
assert_eq!(v.y,10);
}
#[test]
fn test_matrix(){
let mut v=Matrix3::from_value(2);
assert_eq!(v.x_axis.x,2);
assert_eq!(v.y_axis.y,2);
assert_eq!(v.z_axis.z,2);
v.x_axis.x=5;
assert_eq!(v.x_axis.x,5);
v.y_axis.z*=v.x_axis.x;
assert_eq!(v.y_axis.z,10);
}

View File

@ -1,59 +0,0 @@
use crate::types::{Vector2,Vector3,Matrix3x4,Matrix4x2,Matrix3x2,Matrix2x3};
#[test]
fn test_bool(){
assert_eq!(Vector3::new([false,false,false]).any(),false);
assert_eq!(Vector3::new([false,false,true]).any(),true);
assert_eq!(Vector3::new([false,false,true]).all(),false);
assert_eq!(Vector3::new([true,true,true]).all(),true);
}
#[test]
fn test_length_squared(){
assert_eq!(Vector3::new([1,2,3]).length_squared(),14);
}
#[test]
fn test_arithmetic(){
let a=Vector3::new([1,2,3]);
assert_eq!((a+a*2).array,Vector3::new([1*3,2*3,3*3]).array);
}
#[test]
fn matrix_transform_vector(){
let m=Matrix2x3::new([
[1,2,3],
[4,5,6],
]).transpose();
let v=Vector3::new([1,2,3]);
let transformed=m*v;
assert_eq!(transformed.array,Vector2::new([14,32]).array);
}
#[test]
fn matrix_dot(){
// All this code was written row major and I converted the lib to colum major
let rhs=Matrix4x2::new([
[ 1.0, 2.0],
[ 3.0, 4.0],
[ 5.0, 6.0],
[ 7.0, 8.0],
]).transpose(); // | | |
let lhs=Matrix3x4::new([ // | | |
[1.0, 2.0, 3.0, 4.0],// [ 50.0, 60.0],
[5.0, 6.0, 7.0, 8.0],// [114.0,140.0],
[9.0,10.0,11.0,12.0],// [178.0,220.0],
]).transpose();
// Mat3<Vec4>.dot(Mat4<Vec2>) -> Mat3<Vec2>
let m_dot=lhs*rhs;
//In[1]:= {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}} . {{1, 2}, {3, 4}, {5, 6}, {7, 8}}
//Out[1]= {{50, 60}, {114, 140}, {178, 220}}
assert_eq!(
m_dot.array,
Matrix3x2::new([
[50.0,60.0],
[114.0,140.0],
[178.0,220.0],
]).transpose().array
);
}

View File

@ -1,18 +0,0 @@
use crate::vector::Vector;
use crate::matrix::Matrix;
pub type Vector2<T>=Vector<2,T>;
pub type Vector3<T>=Vector<3,T>;
pub type Vector4<T>=Vector<4,T>;
pub type Matrix2<T>=Matrix<2,2,T>;
pub type Matrix2x3<T>=Matrix<2,3,T>;
pub type Matrix2x4<T>=Matrix<2,4,T>;
pub type Matrix3x2<T>=Matrix<3,2,T>;
pub type Matrix3<T>=Matrix<3,3,T>;
pub type Matrix3x4<T>=Matrix<3,4,T>;
pub type Matrix4x2<T>=Matrix<4,2,T>;
pub type Matrix4x3<T>=Matrix<4,3,T>;
pub type Matrix4<T>=Matrix<4,4,T>;

View File

@ -1,19 +0,0 @@
/// An array-backed vector type. Named fields are made accessible via the Deref/DerefMut traits which are implmented for 2-4 dimensions.
/// let mut v = Vector::new([1.0,2.0,3.0]);
/// v.x += v.z;
/// println!("v.x={}",v.x);
#[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)]
pub struct Vector<const N:usize,T>{
pub(crate) array:[T;N],
}
crate::impl_vector!();
// Needs const generics for generic case
crate::impl_vector_extend!(2);
crate::impl_vector_extend!(3);
//cross product
#[cfg(feature="named-fields")]
crate::impl_vector_3!();

View File

@ -1 +0,0 @@
/target

View File

@ -1,10 +0,0 @@
[package]
name = "ratio_ops"
version = "0.1.0"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Ratio operations using trait bounds for avoiding division like the plague."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
[dependencies]

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,4 +0,0 @@
pub mod ratio;
#[cfg(test)]
mod tests;

View File

@ -1,297 +0,0 @@
#[derive(Clone,Copy,Debug,Hash)]
pub struct Ratio<Num,Den>{
pub num:Num,
pub den:Den,
}
impl<Num,Den> Ratio<Num,Den>{
#[inline(always)]
pub const fn new(num:Num,den:Den)->Self{
Self{num,den}
}
}
/// The actual divide implementation, Div is replaced with a Ratio constructor
pub trait Divide<Rhs=Self>{
type Output;
fn divide(self,rhs:Rhs)->Self::Output;
}
impl<Num,Den> Ratio<Num,Den>
where
Num:Divide<Den>,
{
#[inline]
pub fn divide(self)-><Num as Divide<Den>>::Output{
self.num.divide(self.den)
}
}
//take care to use the ratio methods to avoid nested ratios
impl<LhsNum,LhsDen> Ratio<LhsNum,LhsDen>{
#[inline]
pub fn mul_ratio<RhsNum,RhsDen>(self,rhs:Ratio<RhsNum,RhsDen>)->Ratio<<LhsNum as core::ops::Mul<RhsNum>>::Output,<LhsDen as core::ops::Mul<RhsDen>>::Output>
where
LhsNum:core::ops::Mul<RhsNum>,
LhsDen:core::ops::Mul<RhsDen>,
{
Ratio::new(self.num*rhs.num,self.den*rhs.den)
}
#[inline]
pub fn div_ratio<RhsNum,RhsDen>(self,rhs:Ratio<RhsNum,RhsDen>)->Ratio<<LhsNum as core::ops::Mul<RhsDen>>::Output,<LhsDen as core::ops::Mul<RhsNum>>::Output>
where
LhsNum:core::ops::Mul<RhsDen>,
LhsDen:core::ops::Mul<RhsNum>,
{
Ratio::new(self.num*rhs.den,self.den*rhs.num)
}
}
macro_rules! impl_ratio_method {
($trait:ident, $method:ident, $ratio_method:ident) => {
impl<LhsNum,LhsDen> Ratio<LhsNum,LhsDen>{
#[inline]
pub fn $ratio_method<RhsNum,RhsDen,LhsCrossMul,RhsCrossMul>(self,rhs:Ratio<RhsNum,RhsDen>)->Ratio<<LhsCrossMul as core::ops::$trait<RhsCrossMul>>::Output,<LhsDen as core::ops::Mul<RhsDen>>::Output>
where
LhsNum:core::ops::Mul<RhsDen,Output=LhsCrossMul>,
LhsDen:core::ops::Mul<RhsNum,Output=RhsCrossMul>,
LhsDen:core::ops::Mul<RhsDen>,
LhsDen:Copy,
RhsDen:Copy,
LhsCrossMul:core::ops::$trait<RhsCrossMul>,
{
Ratio::new((self.num*rhs.den).$method(self.den*rhs.num),self.den*rhs.den)
}
}
};
}
impl_ratio_method!(Add,add,add_ratio);
impl_ratio_method!(Sub,sub,sub_ratio);
impl_ratio_method!(Rem,rem,rem_ratio);
/// Comparing two ratios needs to know the parity of the denominators
/// For signed integers this can be implemented with is_negative()
pub trait Parity{
fn parity(&self)->bool;
}
macro_rules! impl_parity_unsigned{
($($type:ty),*)=>{
$(
impl Parity for $type{
fn parity(&self)->bool{
false
}
}
)*
};
}
macro_rules! impl_parity_signed{
($($type:ty),*)=>{
$(
impl Parity for $type{
fn parity(&self)->bool{
self.is_negative()
}
}
)*
};
}
macro_rules! impl_parity_float{
($($type:ty),*)=>{
$(
impl Parity for $type{
fn parity(&self)->bool{
self.is_sign_negative()
}
}
)*
};
}
impl_parity_unsigned!(u8,u16,u32,u64,u128,usize);
impl_parity_signed!(i8,i16,i32,i64,i128,isize);
impl_parity_float!(f32,f64);
macro_rules! impl_ratio_ord_method{
($method:ident, $ratio_method:ident, $output:ty)=>{
impl<LhsNum,LhsDen:Parity> Ratio<LhsNum,LhsDen>{
#[inline]
pub fn $ratio_method<RhsNum,RhsDen:Parity,T>(self,rhs:Ratio<RhsNum,RhsDen>)->$output
where
LhsNum:core::ops::Mul<RhsDen,Output=T>,
LhsDen:core::ops::Mul<RhsNum,Output=T>,
T:Ord,
{
match self.den.parity()^rhs.den.parity(){
true=>(self.den*rhs.num).$method(&(self.num*rhs.den)),
false=>(self.num*rhs.den).$method(&(self.den*rhs.num)),
}
}
}
}
}
//PartialEq
impl_ratio_ord_method!(eq,eq_ratio,bool);
//PartialOrd
impl_ratio_ord_method!(lt,lt_ratio,bool);
impl_ratio_ord_method!(gt,gt_ratio,bool);
impl_ratio_ord_method!(le,le_ratio,bool);
impl_ratio_ord_method!(ge,ge_ratio,bool);
impl_ratio_ord_method!(partial_cmp,partial_cmp_ratio,Option<core::cmp::Ordering>);
//Ord
impl_ratio_ord_method!(cmp,cmp_ratio,core::cmp::Ordering);
/* generic rhs mul is not possible!
impl<Lhs,RhsNum,RhsDen> core::ops::Mul<Ratio<RhsNum,RhsDen>> for Lhs
where
Lhs:core::ops::Mul<RhsNum>,
{
type Output=Ratio<<Lhs as core::ops::Mul<RhsNum>>::Output,RhsDen>;
#[inline]
fn mul(self,rhs:Ratio<RhsNum,RhsDen>)->Self::Output{
Ratio::new(self*rhs.num,rhs.den)
}
}
*/
//operators
impl<LhsNum,LhsDen> core::ops::Neg for Ratio<LhsNum,LhsDen>
where
LhsNum:core::ops::Neg,
{
type Output=Ratio<<LhsNum as core::ops::Neg>::Output,LhsDen>;
#[inline]
fn neg(self)->Self::Output{
Ratio::new(-self.num,self.den)
}
}
impl<LhsNum,LhsDen,Rhs> core::ops::Mul<Rhs> for Ratio<LhsNum,LhsDen>
where
LhsNum:core::ops::Mul<Rhs>,
{
type Output=Ratio<<LhsNum as core::ops::Mul<Rhs>>::Output,LhsDen>;
#[inline]
fn mul(self,rhs:Rhs)->Self::Output{
Ratio::new(self.num*rhs,self.den)
}
}
impl<LhsNum,LhsDen,Rhs> core::ops::Div<Rhs> for Ratio<LhsNum,LhsDen>
where
LhsDen:core::ops::Mul<Rhs>,
{
type Output=Ratio<LhsNum,<LhsDen as core::ops::Mul<Rhs>>::Output>;
#[inline]
fn div(self,rhs:Rhs)->Self::Output{
Ratio::new(self.num,self.den*rhs)
}
}
macro_rules! impl_ratio_operator {
($trait:ident, $method:ident) => {
impl<LhsNum,LhsDen,Rhs,Intermediate> core::ops::$trait<Rhs> for Ratio<LhsNum,LhsDen>
where
LhsNum:core::ops::$trait<Intermediate>,
LhsDen:Copy,
Rhs:core::ops::Mul<LhsDen,Output=Intermediate>,
{
type Output=Ratio<<LhsNum as core::ops::$trait<Intermediate>>::Output,LhsDen>;
#[inline]
fn $method(self,rhs:Rhs)->Self::Output{
Ratio::new(self.num.$method(rhs*self.den),self.den)
}
}
};
}
impl_ratio_operator!(Add,add);
impl_ratio_operator!(Sub,sub);
impl_ratio_operator!(Rem,rem);
//assign operators
impl<LhsNum,LhsDen,Rhs> core::ops::MulAssign<Rhs> for Ratio<LhsNum,LhsDen>
where
LhsNum:core::ops::MulAssign<Rhs>,
{
#[inline]
fn mul_assign(&mut self,rhs:Rhs){
self.num*=rhs;
}
}
impl<LhsNum,LhsDen,Rhs> core::ops::DivAssign<Rhs> for Ratio<LhsNum,LhsDen>
where
LhsDen:core::ops::MulAssign<Rhs>,
{
#[inline]
fn div_assign(&mut self,rhs:Rhs){
self.den*=rhs;
}
}
macro_rules! impl_ratio_assign_operator {
($trait:ident, $method:ident) => {
impl<LhsNum,LhsDen,Rhs> core::ops::$trait<Rhs> for Ratio<LhsNum,LhsDen>
where
LhsNum:core::ops::$trait,
LhsDen:Copy,
Rhs:core::ops::Mul<LhsDen,Output=LhsNum>,
{
#[inline]
fn $method(&mut self,rhs:Rhs){
self.num.$method(rhs*self.den)
}
}
};
}
impl_ratio_assign_operator!(AddAssign,add_assign);
impl_ratio_assign_operator!(SubAssign,sub_assign);
impl_ratio_assign_operator!(RemAssign,rem_assign);
// Only implement PartialEq<Self>
// Rust's operators aren't actually that good
impl<LhsNum,LhsDen,RhsNum,RhsDen,T,U> PartialEq<Ratio<RhsNum,RhsDen>> for Ratio<LhsNum,LhsDen>
where
LhsNum:Copy,
LhsDen:Copy,
RhsNum:Copy,
RhsDen:Copy,
LhsNum:core::ops::Mul<RhsDen,Output=T>,
RhsNum:core::ops::Mul<LhsDen,Output=U>,
T:PartialEq<U>,
{
#[inline]
fn eq(&self,other:&Ratio<RhsNum,RhsDen>)->bool{
(self.num*other.den).eq(&(other.num*self.den))
}
}
impl<Num,Den> Eq for Ratio<Num,Den> where Self:PartialEq{}
impl<LhsNum,LhsDen,RhsNum,RhsDen,T,U> PartialOrd<Ratio<RhsNum,RhsDen>> for Ratio<LhsNum,LhsDen>
where
LhsNum:Copy,
LhsDen:Copy,
RhsNum:Copy,
RhsDen:Copy,
LhsNum:core::ops::Mul<RhsDen,Output=T>,
RhsNum:core::ops::Mul<LhsDen,Output=U>,
T:PartialOrd<U>,
{
#[inline]
fn partial_cmp(&self,other:&Ratio<RhsNum,RhsDen>)->Option<core::cmp::Ordering>{
(self.num*other.den).partial_cmp(&(other.num*self.den))
}
}
impl<Num,Den,T> Ord for Ratio<Num,Den>
where
Num:Copy,
Den:Copy,
Num:core::ops::Mul<Den,Output=T>,
T:Ord,
{
#[inline]
fn cmp(&self,other:&Self)->std::cmp::Ordering{
(self.num*other.den).cmp(&(other.num*self.den))
}
}

View File

@ -1,58 +0,0 @@
use crate::ratio::Ratio;
macro_rules! test_op{
($ratio_op:ident,$op:ident,$a:expr,$b:expr,$c:expr,$d:expr)=>{
assert_eq!(
Ratio::new($a,$b).$ratio_op(Ratio::new($c,$d)),
(($a as f32)/($b as f32)).$op(&(($c as f32)/($d as f32)))
);
};
}
macro_rules! test_many_ops{
($ratio_op:ident,$op:ident)=>{
test_op!($ratio_op,$op,1,2,3,4);
test_op!($ratio_op,$op,1,2,-3,4);
test_op!($ratio_op,$op,-1,2,-3,4);
test_op!($ratio_op,$op,-1,-2,-3,4);
test_op!($ratio_op,$op,2,1,6,3);
test_op!($ratio_op,$op,-2,1,6,3);
test_op!($ratio_op,$op,2,-1,-6,3);
test_op!($ratio_op,$op,2,1,6,-3);
};
}
#[test]
fn test_lt(){
test_many_ops!(lt_ratio,lt);
}
#[test]
fn test_gt(){
test_many_ops!(gt_ratio,gt);
}
#[test]
fn test_le(){
test_many_ops!(le_ratio,le);
}
#[test]
fn test_ge(){
test_many_ops!(ge_ratio,ge);
}
#[test]
fn test_eq(){
test_many_ops!(eq_ratio,eq);
}
#[test]
fn test_partial_cmp(){
test_many_ops!(partial_cmp_ratio,partial_cmp);
}
// #[test]
// fn test_cmp(){
// test_many_ops!(cmp_ratio,cmp);
// }

View File

@ -1 +0,0 @@
/target

View File

@ -1,22 +0,0 @@
[package]
name = "strafesnet_rbx_loader"
version = "0.5.2"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Convert Roblox place and model files to StrafesNET data structures."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bytemuck = "1.14.3"
glam = "0.29.0"
lazy-regex = "3.1.0"
rbx_binary = { version = "0.7.4", registry = "strafesnet" }
rbx_dom_weak = { version = "2.7.0", registry = "strafesnet" }
rbx_mesh = "0.1.2"
rbx_reflection_database = { version = "0.2.10", registry = "strafesnet" }
rbx_xml = { version = "0.13.3", registry = "strafesnet" }
roblox_emulator = { path = "../roblox_emulator", registry = "strafesnet" }
strafesnet_common = { path = "../common", registry = "strafesnet" }

View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,19 +0,0 @@
StrafesNET Roblox Loader
========================
## Convert Roblox files into StrafesNET data structures
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>

View File

@ -1,107 +0,0 @@
use std::io::Read;
use rbx_dom_weak::WeakDom;
mod rbx;
mod mesh;
mod primitives;
pub mod data{
pub struct RobloxMeshBytes(Vec<u8>);
impl RobloxMeshBytes{
pub fn new(bytes:Vec<u8>)->Self{
Self(bytes)
}
pub(crate) fn cursor(self)->std::io::Cursor<Vec<u8>>{
std::io::Cursor::new(self.0)
}
}
}
pub struct Model{
dom:WeakDom,
}
impl Model{
fn new(dom:WeakDom)->Self{
Self{dom}
}
pub fn into_place(self)->Place{
let Self{mut dom}=self;
let context=roblox_emulator::context::Context::from_mut(&mut dom);
let services=context.convert_into_place();
Place{dom,services}
}
}
impl AsRef<WeakDom> for Model{
fn as_ref(&self)->&WeakDom{
&self.dom
}
}
pub struct Place{
dom:WeakDom,
services:roblox_emulator::context::Services,
}
impl Place{
fn new(dom:WeakDom)->Option<Self>{
let context=roblox_emulator::context::Context::from_ref(&dom);
Some(Self{
services:context.find_services()?,
dom,
})
}
pub fn run_scripts(&mut self){
let Place{dom,services}=self;
let runner=roblox_emulator::runner::Runner::new().unwrap();
let context=roblox_emulator::context::Context::from_mut(dom);
let scripts=context.scripts();
let runnable=runner.runnable_context_with_services(context,services).unwrap();
for script in scripts{
if let Err(e)=runnable.run_script(script){
println!("runner error: {e}");
}
}
}
}
impl AsRef<WeakDom> for Place{
fn as_ref(&self)->&WeakDom{
&self.dom
}
}
#[derive(Debug)]
pub enum ReadError{
RbxBinary(rbx_binary::DecodeError),
RbxXml(rbx_xml::DecodeError),
Io(std::io::Error),
UnknownFileFormat,
}
impl std::fmt::Display for ReadError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for ReadError{}
pub fn read<R:Read>(input:R)->Result<Model,ReadError>{
let mut buf=std::io::BufReader::new(input);
let peek=std::io::BufRead::fill_buf(&mut buf).map_err(ReadError::Io)?;
match &peek[0..8]{
b"<roblox!"=>rbx_binary::from_reader(buf).map(Model::new).map_err(ReadError::RbxBinary),
b"<roblox "=>rbx_xml::from_reader_default(buf).map(Model::new).map_err(ReadError::RbxXml),
_=>Err(ReadError::UnknownFileFormat),
}
}
//ConvertError
pub fn convert<AcquireRenderConfigId,AcquireMeshId>(
dom:impl AsRef<WeakDom>,
acquire_render_config_id:AcquireRenderConfigId,
acquire_mesh_id:AcquireMeshId
)->rbx::PartialMap1
where
AcquireRenderConfigId:FnMut(Option<&str>)->strafesnet_common::model::RenderConfigId,
AcquireMeshId:FnMut(&str)->strafesnet_common::model::MeshId,
{
rbx::convert(&dom.as_ref(),acquire_render_config_id,acquire_mesh_id)
}

View File

@ -1,210 +0,0 @@
use std::collections::HashMap;
use rbx_mesh::mesh::{Vertex2, Vertex2Truncated};
use strafesnet_common::{integer::vec3,model::{self, ColorId, IndexedVertex, NormalId, PolygonGroup, PolygonList, PositionId, TextureCoordinateId, VertexId}};
#[derive(Debug)]
pub enum Error{
Planar64Vec3(strafesnet_common::integer::Planar64TryFromFloatError),
RbxMesh(rbx_mesh::mesh::Error)
}
impl std::fmt::Display for Error{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for Error{}
fn ingest_vertices2<
AcquirePosId,
AcquireTexId,
AcquireNormalId,
AcquireColorId,
AcquireVertexId,
>(
vertices:Vec<Vertex2>,
acquire_pos_id:&mut AcquirePosId,
acquire_tex_id:&mut AcquireTexId,
acquire_normal_id:&mut AcquireNormalId,
acquire_color_id:&mut AcquireColorId,
acquire_vertex_id:&mut AcquireVertexId,
)->Result<HashMap<rbx_mesh::mesh::VertexId2,VertexId>,Error>
where
AcquirePosId:FnMut([f32;3])->Result<PositionId,Error>,
AcquireTexId:FnMut([f32;2])->TextureCoordinateId,
AcquireNormalId:FnMut([f32;3])->Result<NormalId,Error>,
AcquireColorId:FnMut([f32;4])->ColorId,
AcquireVertexId:FnMut(IndexedVertex)->VertexId,
{
//this monster is collecting a map of old_vertices_index -> unique_vertices_index
//while also doing the inserting unique entries into lists simultaneously
Ok(vertices.into_iter().enumerate().map(|(vertex_id,vertex)|Ok((
rbx_mesh::mesh::VertexId2(vertex_id as u32),
acquire_vertex_id(IndexedVertex{
pos:acquire_pos_id(vertex.pos)?,
tex:acquire_tex_id(vertex.tex),
normal:acquire_normal_id(vertex.norm)?,
color:acquire_color_id(vertex.color.map(|f|f as f32/255.0f32))
}),
))).collect::<Result<_,_>>()?)
}
fn ingest_vertices_truncated2<
AcquirePosId,
AcquireTexId,
AcquireNormalId,
AcquireVertexId,
>(
vertices:Vec<Vertex2Truncated>,
acquire_pos_id:&mut AcquirePosId,
acquire_tex_id:&mut AcquireTexId,
acquire_normal_id:&mut AcquireNormalId,
static_color_id:ColorId,//pick one color and fill everything with it
acquire_vertex_id:&mut AcquireVertexId,
)->Result<HashMap<rbx_mesh::mesh::VertexId2,VertexId>,Error>
where
AcquirePosId:FnMut([f32;3])->Result<PositionId,Error>,
AcquireTexId:FnMut([f32;2])->TextureCoordinateId,
AcquireNormalId:FnMut([f32;3])->Result<NormalId,Error>,
AcquireVertexId:FnMut(IndexedVertex)->VertexId,
{
//this monster is collecting a map of old_vertices_index -> unique_vertices_index
//while also doing the inserting unique entries into lists simultaneously
Ok(vertices.into_iter().enumerate().map(|(vertex_id,vertex)|Ok((
rbx_mesh::mesh::VertexId2(vertex_id as u32),
acquire_vertex_id(IndexedVertex{
pos:acquire_pos_id(vertex.pos)?,
tex:acquire_tex_id(vertex.tex),
normal:acquire_normal_id(vertex.norm)?,
color:static_color_id
}),
))).collect::<Result<_,_>>()?)
}
fn ingest_faces2_lods3(
polygon_groups:&mut Vec<PolygonGroup>,
vertex_id_map:&HashMap<rbx_mesh::mesh::VertexId2,VertexId>,
faces:&Vec<rbx_mesh::mesh::Face2>,
lods:&Vec<rbx_mesh::mesh::Lod3>
){
//faces have to be split into polygon groups based on lod
polygon_groups.extend(lods.windows(2).map(|lod_pair|
PolygonGroup::PolygonList(PolygonList::new(faces[lod_pair[0].0 as usize..lod_pair[1].0 as usize].iter().map(|face|
vec![vertex_id_map[&face.0],vertex_id_map[&face.1],vertex_id_map[&face.2]]
).collect()))
))
}
pub fn convert(roblox_mesh_bytes:crate::data::RobloxMeshBytes)->Result<model::Mesh,Error>{
//generate that mesh boi
let mut unique_pos=Vec::new();
let mut pos_id_from=HashMap::new();
let mut unique_tex=Vec::new();
let mut tex_id_from=HashMap::new();
let mut unique_normal=Vec::new();
let mut normal_id_from=HashMap::new();
let mut unique_color=Vec::new();
let mut color_id_from=HashMap::new();
let mut unique_vertices=Vec::new();
let mut vertex_id_from=HashMap::new();
let mut polygon_groups=Vec::new();
let mut acquire_pos_id=|pos|{
let p=vec3::try_from_f32_array(pos).map_err(Error::Planar64Vec3)?;
Ok(PositionId::new(*pos_id_from.entry(p).or_insert_with(||{
let pos_id=unique_pos.len();
unique_pos.push(p);
pos_id
}) as u32))
};
let mut acquire_tex_id=|tex|{
let h=bytemuck::cast::<[f32;2],[u32;2]>(tex);
TextureCoordinateId::new(*tex_id_from.entry(h).or_insert_with(||{
let tex_id=unique_tex.len();
unique_tex.push(glam::Vec2::from_array(tex));
tex_id
}) as u32)
};
let mut acquire_normal_id=|normal|{
let n=vec3::try_from_f32_array(normal).map_err(Error::Planar64Vec3)?;
Ok(NormalId::new(*normal_id_from.entry(n).or_insert_with(||{
let normal_id=unique_normal.len();
unique_normal.push(n);
normal_id
}) as u32))
};
let mut acquire_color_id=|color|{
let h=bytemuck::cast::<[f32;4],[u32;4]>(color);
ColorId::new(*color_id_from.entry(h).or_insert_with(||{
let color_id=unique_color.len();
unique_color.push(glam::Vec4::from_array(color));
color_id
}) as u32)
};
let mut acquire_vertex_id=|vertex:IndexedVertex|{
VertexId::new(*vertex_id_from.entry(vertex.clone()).or_insert_with(||{
let vertex_id=unique_vertices.len();
unique_vertices.push(vertex);
vertex_id
}) as u32)
};
match rbx_mesh::read_versioned(roblox_mesh_bytes.cursor()).map_err(Error::RbxMesh)?{
rbx_mesh::mesh::VersionedMesh::Version1(mesh)=>{
let color_id=acquire_color_id([1.0f32;4]);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(mesh.vertices.chunks_exact(3).map(|trip|{
let mut ingest_vertex1=|vertex:&rbx_mesh::mesh::Vertex1|Ok(acquire_vertex_id(IndexedVertex{
pos:acquire_pos_id(vertex.pos)?,
tex:acquire_tex_id([vertex.tex[0],vertex.tex[1]]),
normal:acquire_normal_id(vertex.norm)?,
color:color_id,
}));
Ok(vec![ingest_vertex1(&trip[0])?,ingest_vertex1(&trip[1])?,ingest_vertex1(&trip[2])?])
}).collect::<Result<_,_>>()?)));
},
rbx_mesh::mesh::VersionedMesh::Version2(mesh)=>{
let vertex_id_map=match mesh.header.sizeof_vertex{
rbx_mesh::mesh::SizeOfVertex2::Truncated=>{
//pick white and make all the vertices white
let color_id=acquire_color_id([1.0f32;4]);
ingest_vertices_truncated2(mesh.vertices_truncated,&mut acquire_pos_id,&mut acquire_tex_id,&mut acquire_normal_id,color_id,&mut acquire_vertex_id)
},
rbx_mesh::mesh::SizeOfVertex2::Full=>ingest_vertices2(mesh.vertices,&mut acquire_pos_id,&mut acquire_tex_id,&mut acquire_normal_id,&mut acquire_color_id,&mut acquire_vertex_id),
}?;
//one big happy group for all the faces
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(mesh.faces.into_iter().map(|face|
vec![vertex_id_map[&face.0],vertex_id_map[&face.1],vertex_id_map[&face.2]]
).collect())));
},
rbx_mesh::mesh::VersionedMesh::Version3(mesh)=>{
let vertex_id_map=match mesh.header.sizeof_vertex{
rbx_mesh::mesh::SizeOfVertex2::Truncated=>{
let color_id=acquire_color_id([1.0f32;4]);
ingest_vertices_truncated2(mesh.vertices_truncated,&mut acquire_pos_id,&mut acquire_tex_id,&mut acquire_normal_id,color_id,&mut acquire_vertex_id)
},
rbx_mesh::mesh::SizeOfVertex2::Full=>ingest_vertices2(mesh.vertices,&mut acquire_pos_id,&mut acquire_tex_id,&mut acquire_normal_id,&mut acquire_color_id,&mut acquire_vertex_id),
}?;
ingest_faces2_lods3(&mut polygon_groups,&vertex_id_map,&mesh.faces,&mesh.lods);
},
rbx_mesh::mesh::VersionedMesh::Version4(mesh)=>{
let vertex_id_map=ingest_vertices2(
mesh.vertices,&mut acquire_pos_id,&mut acquire_tex_id,&mut acquire_normal_id,&mut acquire_color_id,&mut acquire_vertex_id
)?;
ingest_faces2_lods3(&mut polygon_groups,&vertex_id_map,&mesh.faces,&mesh.lods);
},
rbx_mesh::mesh::VersionedMesh::Version5(mesh)=>{
let vertex_id_map=ingest_vertices2(
mesh.vertices,&mut acquire_pos_id,&mut acquire_tex_id,&mut acquire_normal_id,&mut acquire_color_id,&mut acquire_vertex_id
)?;
ingest_faces2_lods3(&mut polygon_groups,&vertex_id_map,&mesh.faces,&mesh.lods);
},
}
Ok(model::Mesh{
unique_pos,
unique_normal,
unique_tex,
unique_color,
unique_vertices,
polygon_groups,
//these should probably be moved to the model...
graphics_groups:Vec::new(),
physics_groups:Vec::new(),
})
}

View File

@ -1,510 +0,0 @@
use strafesnet_common::model::{Color4,TextureCoordinate,Mesh,IndexedGraphicsGroup,IndexedPhysicsGroup,IndexedVertex,PolygonGroupId,PolygonGroup,PolygonList,IndexedVertexList,PositionId,TextureCoordinateId,NormalId,ColorId,VertexId,RenderConfigId};
use strafesnet_common::integer::{vec3,Planar64Vec3};
#[derive(Debug)]
pub enum Primitives{
Sphere,
Cube,
Cylinder,
Wedge,
CornerWedge,
}
#[derive(Hash,PartialEq,Eq)]
pub enum CubeFace{
Right,
Top,
Back,
Left,
Bottom,
Front,
}
const CUBE_DEFAULT_TEXTURE_COORDS:[TextureCoordinate;4]=[
TextureCoordinate::new(0.0,0.0),
TextureCoordinate::new(1.0,0.0),
TextureCoordinate::new(1.0,1.0),
TextureCoordinate::new(0.0,1.0),
];
const CUBE_DEFAULT_VERTICES:[Planar64Vec3;8]=[
vec3::int(-1,-1, 1),//0 left bottom back
vec3::int( 1,-1, 1),//1 right bottom back
vec3::int( 1, 1, 1),//2 right top back
vec3::int(-1, 1, 1),//3 left top back
vec3::int(-1, 1,-1),//4 left top front
vec3::int( 1, 1,-1),//5 right top front
vec3::int( 1,-1,-1),//6 right bottom front
vec3::int(-1,-1,-1),//7 left bottom front
];
const CUBE_DEFAULT_NORMALS:[Planar64Vec3;6]=[
vec3::int( 1, 0, 0),//CubeFace::Right
vec3::int( 0, 1, 0),//CubeFace::Top
vec3::int( 0, 0, 1),//CubeFace::Back
vec3::int(-1, 0, 0),//CubeFace::Left
vec3::int( 0,-1, 0),//CubeFace::Bottom
vec3::int( 0, 0,-1),//CubeFace::Front
];
const CUBE_DEFAULT_POLYS:[[[u32;3];4];6]=[
// right (1, 0, 0)
[
[6,2,0],//[vertex,tex,norm]
[5,1,0],
[2,0,0],
[1,3,0],
],
// top (0, 1, 0)
[
[5,3,1],
[4,2,1],
[3,1,1],
[2,0,1],
],
// back (0, 0, 1)
[
[0,3,2],
[1,2,2],
[2,1,2],
[3,0,2],
],
// left (-1, 0, 0)
[
[0,2,3],
[3,1,3],
[4,0,3],
[7,3,3],
],
// bottom (0,-1, 0)
[
[1,1,4],
[0,0,4],
[7,3,4],
[6,2,4],
],
// front (0, 0,-1)
[
[4,1,5],
[5,0,5],
[6,3,5],
[7,2,5],
],
];
#[derive(Hash,PartialEq,Eq)]
pub enum WedgeFace{
Right,
TopFront,
Back,
Left,
Bottom,
}
const WEDGE_DEFAULT_NORMALS:[Planar64Vec3;5]=[
vec3::int( 1, 0, 0),//Wedge::Right
vec3::int( 0, 1,-1),//Wedge::TopFront
vec3::int( 0, 0, 1),//Wedge::Back
vec3::int(-1, 0, 0),//Wedge::Left
vec3::int( 0,-1, 0),//Wedge::Bottom
];
/*
local cornerWedgeVerticies = {
Vector3.new(-1/2,-1/2,-1/2),7
Vector3.new(-1/2,-1/2, 1/2),0
Vector3.new( 1/2,-1/2,-1/2),6
Vector3.new( 1/2,-1/2, 1/2),1
Vector3.new( 1/2, 1/2,-1/2),5
}
*/
#[derive(Hash,PartialEq,Eq)]
pub enum CornerWedgeFace{
Right,
TopBack,
TopLeft,
Bottom,
Front,
}
const CORNERWEDGE_DEFAULT_NORMALS:[Planar64Vec3;5]=[
vec3::int( 1, 0, 0),//CornerWedge::Right
vec3::int( 0, 1, 1),//CornerWedge::BackTop
vec3::int(-1, 1, 0),//CornerWedge::LeftTop
vec3::int( 0,-1, 0),//CornerWedge::Bottom
vec3::int( 0, 0,-1),//CornerWedge::Front
];
pub fn unit_sphere(render:RenderConfigId)->Mesh{
unit_cube(render)
}
#[derive(Default)]
pub struct CubeFaceDescription([Option<FaceDescription>;6]);
impl CubeFaceDescription{
pub fn insert(&mut self,index:CubeFace,value:FaceDescription){
self.0[index as usize]=Some(value);
}
pub fn pairs(self)->std::iter::FilterMap<std::iter::Enumerate<std::array::IntoIter<Option<FaceDescription>,6>>,impl FnMut((usize,Option<FaceDescription>))->Option<(usize,FaceDescription)>>{
self.0.into_iter().enumerate().filter_map(|v|v.1.map(|u|(v.0,u)))
}
}
pub fn unit_cube(render:RenderConfigId)->Mesh{
let mut t=CubeFaceDescription::default();
t.insert(CubeFace::Right,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Top,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Back,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Left,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Bottom,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Front,FaceDescription::new_with_render_id(render));
generate_partial_unit_cube(t)
}
pub fn unit_cylinder(render:RenderConfigId)->Mesh{
//lmao
unit_cube(render)
}
#[derive(Default)]
pub struct WedgeFaceDescription([Option<FaceDescription>;5]);
impl WedgeFaceDescription{
pub fn insert(&mut self,index:WedgeFace,value:FaceDescription){
self.0[index as usize]=Some(value);
}
pub fn pairs(self)->std::iter::FilterMap<std::iter::Enumerate<std::array::IntoIter<Option<FaceDescription>,5>>,impl FnMut((usize,Option<FaceDescription>))->Option<(usize,FaceDescription)>>{
self.0.into_iter().enumerate().filter_map(|v|v.1.map(|u|(v.0,u)))
}
}
pub fn unit_wedge(render:RenderConfigId)->Mesh{
let mut t=WedgeFaceDescription::default();
t.insert(WedgeFace::Right,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::TopFront,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::Back,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::Left,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::Bottom,FaceDescription::new_with_render_id(render));
generate_partial_unit_wedge(t)
}
#[derive(Default)]
pub struct CornerWedgeFaceDescription([Option<FaceDescription>;5]);
impl CornerWedgeFaceDescription{
pub fn insert(&mut self,index:CornerWedgeFace,value:FaceDescription){
self.0[index as usize]=Some(value);
}
pub fn pairs(self)->std::iter::FilterMap<std::iter::Enumerate<std::array::IntoIter<Option<FaceDescription>,5>>,impl FnMut((usize,Option<FaceDescription>))->Option<(usize,FaceDescription)>>{
self.0.into_iter().enumerate().filter_map(|v|v.1.map(|u|(v.0,u)))
}
}
pub fn unit_cornerwedge(render:RenderConfigId)->Mesh{
let mut t=CornerWedgeFaceDescription::default();
t.insert(CornerWedgeFace::Right,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::TopBack,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::TopLeft,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::Bottom,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::Front,FaceDescription::new_with_render_id(render));
generate_partial_unit_cornerwedge(t)
}
#[derive(Clone)]
pub struct FaceDescription{
pub render:RenderConfigId,
pub transform:glam::Affine2,
pub color:Color4,
}
impl FaceDescription{
pub fn new_with_render_id(render:RenderConfigId)->Self {
Self{
render,
transform:glam::Affine2::IDENTITY,
color:Color4::new(1.0,1.0,1.0,0.0),//zero alpha to hide the default texture
}
}
}
pub fn generate_partial_unit_cube(face_descriptions:CubeFaceDescription)->Mesh{
let mut generated_pos=Vec::new();
let mut generated_tex=Vec::new();
let mut generated_normal=Vec::new();
let mut generated_color=Vec::new();
let mut generated_vertices=Vec::new();
let mut polygon_groups=Vec::new();
let mut graphics_groups=Vec::new();
let mut physics_group=IndexedPhysicsGroup::default();
let mut transforms=Vec::new();
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
for (face_id,face_description) in face_descriptions.pairs(){
//assume that scanning short lists is faster than hashing.
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
transform_index
}else{
//create new transform_index
let transform_index=transforms.len();
transforms.push(face_description.transform);
generated_tex.extend(CUBE_DEFAULT_TEXTURE_COORDS.map(|tex|
face_description.transform.transform_point2(tex)
));
transform_index
} as u32;
let color_index=if let Some(color_index)=generated_color.iter().position(|&color|color==face_description.color){
color_index
}else{
//create new color_index
let color_index=generated_color.len();
generated_color.push(face_description.color);
color_index
} as u32;
//always push normal
let normal_index=generated_normal.len() as u32;
generated_normal.push(CUBE_DEFAULT_NORMALS[face_id]);
//push vertices as they are needed
let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![
CUBE_DEFAULT_POLYS[face_id].map(|tup|{
let pos=CUBE_DEFAULT_VERTICES[tup[0] as usize];
let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){
pos_index
}else{
//create new pos_index
let pos_index=generated_pos.len();
generated_pos.push(pos);
pos_index
} as u32;
//always push vertex
let vertex=IndexedVertex{
pos:PositionId::new(pos_index),
tex:TextureCoordinateId::new(tup[1]+4*transform_index),
normal:NormalId::new(normal_index),
color:ColorId::new(color_index),
};
let vert_index=generated_vertices.len();
generated_vertices.push(vertex);
VertexId::new(vert_index as u32)
}).to_vec(),
])));
graphics_groups.push(IndexedGraphicsGroup{
render:face_description.render,
groups:vec![group_id],
});
physics_group.groups.push(group_id);
}
Mesh{
unique_pos:generated_pos,
unique_tex:generated_tex,
unique_normal:generated_normal,
unique_color:generated_color,
unique_vertices:generated_vertices,
polygon_groups,
graphics_groups,
physics_groups:vec![physics_group],
}
}
//don't think too hard about the copy paste because this is all going into the map tool eventually...
pub fn generate_partial_unit_wedge(face_descriptions:WedgeFaceDescription)->Mesh{
let wedge_default_polys=[
// right (1, 0, 0)
vec![
[6,2,0],//[vertex,tex,norm]
[2,0,0],
[1,3,0],
],
// FrontTop (0, 1, -1)
vec![
[3,1,1],
[2,0,1],
[6,3,1],
[7,2,1],
],
// back (0, 0, 1)
vec![
[0,3,2],
[1,2,2],
[2,1,2],
[3,0,2],
],
// left (-1, 0, 0)
vec![
[0,2,3],
[3,1,3],
[7,3,3],
],
// bottom (0,-1, 0)
vec![
[1,1,4],
[0,0,4],
[7,3,4],
[6,2,4],
],
];
let mut generated_pos=Vec::new();
let mut generated_tex=Vec::new();
let mut generated_normal=Vec::new();
let mut generated_color=Vec::new();
let mut generated_vertices=Vec::new();
let mut polygon_groups=Vec::new();
let mut graphics_groups=Vec::new();
let mut physics_group=IndexedPhysicsGroup::default();
let mut transforms=Vec::new();
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
for (face_id,face_description) in face_descriptions.pairs(){
//assume that scanning short lists is faster than hashing.
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
transform_index
}else{
//create new transform_index
let transform_index=transforms.len();
transforms.push(face_description.transform);
generated_tex.extend(CUBE_DEFAULT_TEXTURE_COORDS.map(|tex|
face_description.transform.transform_point2(tex)
));
transform_index
} as u32;
let color_index=if let Some(color_index)=generated_color.iter().position(|&color|color==face_description.color){
color_index
}else{
//create new color_index
let color_index=generated_color.len();
generated_color.push(face_description.color);
color_index
} as u32;
//always push normal
let normal_index=generated_normal.len() as u32;
generated_normal.push(WEDGE_DEFAULT_NORMALS[face_id]);
//push vertices as they are needed
let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![
wedge_default_polys[face_id].iter().map(|tup|{
let pos=CUBE_DEFAULT_VERTICES[tup[0] as usize];
let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){
pos_index
}else{
//create new pos_index
let pos_index=generated_pos.len();
generated_pos.push(pos);
pos_index
} as u32;
//always push vertex
let vertex=IndexedVertex{
pos:PositionId::new(pos_index),
tex:TextureCoordinateId::new(tup[1]+4*transform_index),
normal:NormalId::new(normal_index),
color:ColorId::new(color_index),
};
let vert_index=generated_vertices.len();
generated_vertices.push(vertex);
VertexId::new(vert_index as u32)
}).collect()
])));
graphics_groups.push(IndexedGraphicsGroup{
render:face_description.render,
groups:vec![group_id],
});
physics_group.groups.push(group_id);
}
Mesh{
unique_pos:generated_pos,
unique_tex:generated_tex,
unique_normal:generated_normal,
unique_color:generated_color,
unique_vertices:generated_vertices,
polygon_groups,
graphics_groups,
physics_groups:vec![physics_group],
}
}
pub fn generate_partial_unit_cornerwedge(face_descriptions:CornerWedgeFaceDescription)->Mesh{
let cornerwedge_default_polys=[
// right (1, 0, 0)
vec![
[6,2,0],//[vertex,tex,norm]
[5,1,0],
[1,3,0],
],
// BackTop (0, 1, 1)
vec![
[5,3,1],
[0,1,1],
[1,0,1],
],
// LeftTop (-1, 1, 0)
vec![
[5,3,2],
[7,2,2],
[0,1,2],
],
// bottom (0,-1, 0)
vec![
[1,1,3],
[0,0,3],
[7,3,3],
[6,2,3],
],
// front (0, 0,-1)
vec![
[5,0,4],
[6,3,4],
[7,2,4],
],
];
let mut generated_pos=Vec::new();
let mut generated_tex=Vec::new();
let mut generated_normal=Vec::new();
let mut generated_color=Vec::new();
let mut generated_vertices=Vec::new();
let mut polygon_groups=Vec::new();
let mut graphics_groups=Vec::new();
let mut physics_group=IndexedPhysicsGroup::default();
let mut transforms=Vec::new();
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
for (face_id,face_description) in face_descriptions.pairs(){
//assume that scanning short lists is faster than hashing.
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
transform_index
}else{
//create new transform_index
let transform_index=transforms.len();
transforms.push(face_description.transform);
generated_tex.extend(CUBE_DEFAULT_TEXTURE_COORDS.map(|tex|
face_description.transform.transform_point2(tex)
));
transform_index
} as u32;
let color_index=if let Some(color_index)=generated_color.iter().position(|&color|color==face_description.color){
color_index
}else{
//create new color_index
let color_index=generated_color.len();
generated_color.push(face_description.color);
color_index
} as u32;
//always push normal
let normal_index=generated_normal.len() as u32;
generated_normal.push(CORNERWEDGE_DEFAULT_NORMALS[face_id]);
//push vertices as they are needed
let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![
cornerwedge_default_polys[face_id].iter().map(|tup|{
let pos=CUBE_DEFAULT_VERTICES[tup[0] as usize];
let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){
pos_index
}else{
//create new pos_index
let pos_index=generated_pos.len();
generated_pos.push(pos);
pos_index
} as u32;
//always push vertex
let vertex=IndexedVertex{
pos:PositionId::new(pos_index),
tex:TextureCoordinateId::new(tup[1]+4*transform_index),
normal:NormalId::new(normal_index),
color:ColorId::new(color_index),
};
let vert_index=generated_vertices.len();
generated_vertices.push(vertex);
VertexId::new(vert_index as u32)
}).collect(),
])));
graphics_groups.push(IndexedGraphicsGroup{
render:face_description.render,
groups:vec![group_id],
});
physics_group.groups.push(group_id);
}
Mesh{
unique_pos:generated_pos,
unique_tex:generated_tex,
unique_normal:generated_normal,
unique_color:generated_color,
unique_vertices:generated_vertices,
polygon_groups,
graphics_groups,
physics_groups:vec![physics_group],
}
}

View File

@ -1,909 +0,0 @@
use std::collections::HashMap;
use crate::primitives;
use strafesnet_common::map;
use strafesnet_common::model;
use strafesnet_common::gameplay_modes;
use strafesnet_common::gameplay_style;
use strafesnet_common::gameplay_attributes as attr;
use strafesnet_common::integer::{self,vec3,Planar64,Planar64Vec3,Planar64Mat3,Planar64Affine3};
use strafesnet_common::model::RenderConfigId;
use strafesnet_common::updatable::Updatable;
fn class_is_a(class: &str, superclass: &str) -> bool {
if class==superclass {
return true
}
let class_descriptor=rbx_reflection_database::get().classes.get(class);
if let Some(descriptor) = &class_descriptor {
if let Some(class_super) = &descriptor.superclass {
return class_is_a(&class_super, superclass)
}
}
false
}
fn recursive_collect_superclass(objects: &mut std::vec::Vec<rbx_dom_weak::types::Ref>,dom: &rbx_dom_weak::WeakDom, instance: &rbx_dom_weak::Instance, superclass: &str){
let mut stack=vec![instance];
while let Some(item)=stack.pop(){
for &referent in item.children(){
if let Some(c)=dom.get_by_ref(referent){
if class_is_a(c.class.as_str(),superclass){
objects.push(c.referent());//copy ref
}
stack.push(c);
}
}
}
}
fn planar64_affine3_from_roblox(cf:&rbx_dom_weak::types::CFrame,size:&rbx_dom_weak::types::Vector3)->Planar64Affine3{
Planar64Affine3::new(
Planar64Mat3::from_cols([
vec3::try_from_f32_array([cf.orientation.x.x,cf.orientation.y.x,cf.orientation.z.x]).unwrap()
*integer::try_from_f32(size.x/2.0).unwrap(),
vec3::try_from_f32_array([cf.orientation.x.y,cf.orientation.y.y,cf.orientation.z.y]).unwrap()
*integer::try_from_f32(size.y/2.0).unwrap(),
vec3::try_from_f32_array([cf.orientation.x.z,cf.orientation.y.z,cf.orientation.z.z]).unwrap()
*integer::try_from_f32(size.z/2.0).unwrap(),
].map(|t|t.fix_1())),
vec3::try_from_f32_array([cf.position.x,cf.position.y,cf.position.z]).unwrap()
)
}
struct ModeBuilder{
mode:gameplay_modes::Mode,
final_stage_id_from_builder_stage_id:HashMap<gameplay_modes::StageId,gameplay_modes::StageId>,
}
#[derive(Default)]
struct ModesBuilder{
modes:HashMap<gameplay_modes::ModeId,gameplay_modes::Mode>,
stages:HashMap<gameplay_modes::ModeId,HashMap<gameplay_modes::StageId,gameplay_modes::Stage>>,
mode_updates:Vec<(gameplay_modes::ModeId,gameplay_modes::ModeUpdate)>,
stage_updates:Vec<(gameplay_modes::ModeId,gameplay_modes::StageId,gameplay_modes::StageUpdate)>,
}
impl ModesBuilder{
fn build(mut self)->gameplay_modes::Modes{
//collect modes and stages into contiguous arrays
let mut unique_modes:Vec<(gameplay_modes::ModeId,gameplay_modes::Mode)>
=self.modes.into_iter().collect();
unique_modes.sort_by_key(|&(mode_id,_)|mode_id);
let (mut modes,final_mode_id_from_builder_mode_id):(Vec<ModeBuilder>,HashMap<gameplay_modes::ModeId,gameplay_modes::ModeId>)
=unique_modes.into_iter().enumerate()
.map(|(final_mode_id,(builder_mode_id,mut mode))|{
(
ModeBuilder{
final_stage_id_from_builder_stage_id:self.stages.remove(&builder_mode_id).map_or_else(||HashMap::new(),|stages|{
let mut unique_stages:Vec<(gameplay_modes::StageId,gameplay_modes::Stage)>
=stages.into_iter().collect();
unique_stages.sort_by(|a,b|a.0.cmp(&b.0));
unique_stages.into_iter().enumerate()
.map(|(final_stage_id,(builder_stage_id,stage))|{
mode.push_stage(stage);
(builder_stage_id,gameplay_modes::StageId::new(final_stage_id as u32))
}).collect()
}),
mode,
},
(
builder_mode_id,
gameplay_modes::ModeId::new(final_mode_id as u32)
)
)
}).unzip();
//TODO: failure messages or errors or something
//push stage updates
for (builder_mode_id,builder_stage_id,stage_update) in self.stage_updates{
if let Some(final_mode_id)=final_mode_id_from_builder_mode_id.get(&builder_mode_id){
if let Some(mode)=modes.get_mut(final_mode_id.get() as usize){
if let Some(&final_stage_id)=mode.final_stage_id_from_builder_stage_id.get(&builder_stage_id){
if let Some(stage)=mode.mode.get_stage_mut(final_stage_id){
stage.update(stage_update);
}
}
}
}
}
//push mode updates
for (builder_mode_id,mut mode_update) in self.mode_updates{
if let Some(final_mode_id)=final_mode_id_from_builder_mode_id.get(&builder_mode_id){
if let Some(mode)=modes.get_mut(final_mode_id.get() as usize){
//map stage id on stage elements
mode_update.map_stage_element_ids(|stage_id|
//walk down one stage id at a time until a stage is found
//TODO use better logic like BTreeMap::upper_bound instead of walking
// final_stage_id_from_builder_stage_id.upper_bound(Bound::Included(&stage_id))
// .value().copied().unwrap_or(gameplay_modes::StageId::FIRST)
(0..=stage_id.get()).rev().find_map(|builder_stage_id|
//map the stage element to that stage
mode.final_stage_id_from_builder_stage_id.get(&gameplay_modes::StageId::new(builder_stage_id)).copied()
).unwrap_or(gameplay_modes::StageId::FIRST)
);
mode.mode.update(mode_update);
}
}
}
gameplay_modes::Modes::new(modes.into_iter().map(|mode_builder|mode_builder.mode).collect())
}
fn insert_mode(&mut self,mode_id:gameplay_modes::ModeId,mode:gameplay_modes::Mode){
assert!(self.modes.insert(mode_id,mode).is_none(),"Cannot replace existing mode");
}
fn insert_stage(&mut self,mode_id:gameplay_modes::ModeId,stage_id:gameplay_modes::StageId,stage:gameplay_modes::Stage){
assert!(self.stages.entry(mode_id).or_insert(HashMap::new()).insert(stage_id,stage).is_none(),"Cannot replace existing stage");
}
fn push_mode_update(&mut self,mode_id:gameplay_modes::ModeId,mode_update:gameplay_modes::ModeUpdate){
self.mode_updates.push((mode_id,mode_update));
}
fn push_stage_update(&mut self,mode_id:gameplay_modes::ModeId,stage_id:gameplay_modes::StageId,stage_update:gameplay_modes::StageUpdate){
self.stage_updates.push((mode_id,stage_id,stage_update));
}
}
fn get_attributes(name:&str,can_collide:bool,velocity:Planar64Vec3,model_id:model::ModelId,modes_builder:&mut ModesBuilder,wormhole_in_model_to_id:&mut HashMap<model::ModelId,u32>,wormhole_id_to_out_model:&mut HashMap<u32,model::ModelId>)->attr::CollisionAttributes{
let mut general=attr::GeneralAttributes::default();
let mut intersecting=attr::IntersectingAttributes::default();
let mut contacting=attr::ContactingAttributes::default();
let mut force_can_collide=can_collide;
let mut force_intersecting=false;
match name{
"Water"=>{
force_can_collide=false;
//TODO: read stupid CustomPhysicalProperties
intersecting.water=Some(attr::IntersectingWater{density:Planar64::ONE,viscosity:Planar64::ONE/10,velocity});
},
"Accelerator"=>{
//although the new game supports collidable accelerators, this is a roblox compatability map loader
force_can_collide=false;
general.accelerator=Some(attr::Accelerator{acceleration:velocity});
},
// "UnorderedCheckpoint"=>general.teleport_behaviour=Some(model::TeleportBehaviour::StageElement(attr::StageElement{
// mode_id:0,
// stage_id:0,
// force:false,
// behaviour:model::StageElementBehaviour::Unordered
// })),
"SetVelocity"=>general.trajectory=Some(attr::SetTrajectory::Velocity(velocity)),
"MapStart"=>{
force_can_collide=false;
force_intersecting=true;
modes_builder.insert_mode(
gameplay_modes::ModeId::MAIN,
gameplay_modes::Mode::empty(
gameplay_style::StyleModifiers::roblox_bhop(),
model_id
)
);
},
"MapFinish"=>{
force_can_collide=false;
force_intersecting=true;
modes_builder.push_mode_update(
gameplay_modes::ModeId::MAIN,
gameplay_modes::ModeUpdate::zone(
model_id,
gameplay_modes::Zone::Finish,
),
);
},
"MapAnticheat"=>{
force_can_collide=false;
force_intersecting=true;
modes_builder.push_mode_update(
gameplay_modes::ModeId::MAIN,
gameplay_modes::ModeUpdate::zone(
model_id,
gameplay_modes::Zone::Anticheat,
),
);
},
"Platform"=>{
modes_builder.push_mode_update(
gameplay_modes::ModeId::MAIN,
gameplay_modes::ModeUpdate::element(
model_id,
gameplay_modes::StageElement::new(gameplay_modes::StageId::FIRST,false,gameplay_modes::StageElementBehaviour::Platform,None),//roblox does not know which stage the platform belongs to
),
);
},
other=>{
let regman=lazy_regex::regex!(r"^(BonusStart|WormholeOut)(\d+)$");
if let Some(captures)=regman.captures(other){
match &captures[1]{
"BonusStart"=>{
force_can_collide=false;
force_intersecting=true;
modes_builder.insert_mode(
gameplay_modes::ModeId::new(captures[2].parse::<u32>().unwrap()),
gameplay_modes::Mode::empty(
gameplay_style::StyleModifiers::roblox_bhop(),
model_id
)
);
},
"WormholeOut"=>{
//the PhysicsModelId has to exist for it to be teleported to!
force_intersecting=true;
//this object is not special in strafe client, but the roblox mapping needs to be converted to model id
assert!(wormhole_id_to_out_model.insert(captures[2].parse::<u32>().unwrap(),model_id).is_none(),"Cannot have multiple WormholeOut with same id");
},
_=>(),
}
}else if let Some(captures)=lazy_regex::regex!(r"^(Force)?(Spawn|SpawnAt|Trigger|Teleport|Platform)(\d+)$")
.captures(other){
force_intersecting=true;
let stage_id=gameplay_modes::StageId::new(captures[3].parse::<u32>().unwrap());
let stage_element=gameplay_modes::StageElement::new(
//stage_id:
stage_id,
//force:
match captures.get(1){
Some(m)=>m.as_str()=="Force",
None=>false,
},
//behaviour:
match &captures[2]{
"Spawn"=>{
modes_builder.insert_stage(
gameplay_modes::ModeId::MAIN,
stage_id,
gameplay_modes::Stage::empty(model_id),
);
//TODO: let denormalize handle this
gameplay_modes::StageElementBehaviour::SpawnAt
},
"SpawnAt"=>gameplay_modes::StageElementBehaviour::SpawnAt,
//cancollide false so you don't hit the side
//NOT a decoration
"Trigger"=>{force_can_collide=false;gameplay_modes::StageElementBehaviour::Trigger},
"Teleport"=>{force_can_collide=false;gameplay_modes::StageElementBehaviour::Teleport},
"Platform"=>gameplay_modes::StageElementBehaviour::Platform,
_=>panic!("regex1[2] messed up bad"),
},
None
);
modes_builder.push_mode_update(
gameplay_modes::ModeId::MAIN,
gameplay_modes::ModeUpdate::element(
model_id,
stage_element,
),
);
}else if let Some(captures)=lazy_regex::regex!(r"^(Jump|WormholeIn)(\d+)$")
.captures(other){
match &captures[1]{
"Jump"=>modes_builder.push_mode_update(
gameplay_modes::ModeId::MAIN,
gameplay_modes::ModeUpdate::element(
model_id,
//jump_limit:
gameplay_modes::StageElement::new(
gameplay_modes::StageId::FIRST,
false,
gameplay_modes::StageElementBehaviour::Check,
Some(captures[2].parse::<u8>().unwrap())
)
),
),
"WormholeIn"=>{
force_can_collide=false;
force_intersecting=true;
assert!(wormhole_in_model_to_id.insert(model_id,captures[2].parse::<u32>().unwrap()).is_none(),"Impossible");
},
_=>panic!("regex2[1] messed up bad"),
}
}else if let Some(captures)=lazy_regex::regex!(r"^Bonus(Finish|Anticheat)(\d+)$")
.captures(other){
force_can_collide=false;
force_intersecting=true;
modes_builder.push_mode_update(
gameplay_modes::ModeId::new(captures[2].parse::<u32>().unwrap()),
gameplay_modes::ModeUpdate::zone(
model_id,
//zone:
match &captures[1]{
"Finish"=>gameplay_modes::Zone::Finish,
"Anticheat"=>gameplay_modes::Zone::Anticheat,
_=>panic!("regex3[1] messed up bad"),
},
),
);
}
// else if let Some(captures)=lazy_regex::regex!(r"^Stage(\d+)OrderedCheckpoint(\d+)$")
// .captures(other){
// match &captures[1]{
// "OrderedCheckpoint"=>modes_builder.push_stage_update(
// gameplay_modes::ModeId::MAIN,
// gameplay_modes::StageId::new(0),
// gameplay_modes::StageUpdate::ordered_checkpoint(captures[2].parse::<u32>().unwrap()),
// ),
// _=>panic!("regex3[1] messed up bad"),
// }
// }
}
}
//need some way to skip this
if velocity!=vec3::ZERO{
general.booster=Some(attr::Booster::Velocity(velocity));
}
match force_can_collide{
true=>{
match name{
"Bounce"=>contacting.contact_behaviour=Some(attr::ContactingBehaviour::Elastic(u32::MAX)),
"Surf"=>contacting.contact_behaviour=Some(attr::ContactingBehaviour::Surf),
"Ladder"=>contacting.contact_behaviour=Some(attr::ContactingBehaviour::Ladder(attr::ContactingLadder{sticky:true})),
_=>(),
}
attr::CollisionAttributes::Contact(attr::ContactAttributes{contacting,general})
},
false=>if force_intersecting
||general.any()
||intersecting.any()
{
attr::CollisionAttributes::Intersect(attr::IntersectAttributes{intersecting,general})
}else{
attr::CollisionAttributes::Decoration
},
}
}
#[derive(Clone,Copy,PartialEq)]
struct RobloxTextureTransform{
offset_u:f32,
offset_v:f32,
scale_u:f32,
scale_v:f32,
}
impl std::cmp::Eq for RobloxTextureTransform{}//????
impl std::default::Default for RobloxTextureTransform{
fn default()->Self{
Self{offset_u:0.0,offset_v:0.0,scale_u:1.0,scale_v:1.0}
}
}
impl std::hash::Hash for RobloxTextureTransform{
fn hash<H:std::hash::Hasher>(&self,state:&mut H) {
self.offset_u.to_ne_bytes().hash(state);
self.offset_v.to_ne_bytes().hash(state);
self.scale_u.to_ne_bytes().hash(state);
self.scale_v.to_ne_bytes().hash(state);
}
}
#[derive(Clone,PartialEq)]
struct RobloxFaceTextureDescription{
render:RenderConfigId,
color:glam::Vec4,
transform:RobloxTextureTransform,
}
impl std::cmp::Eq for RobloxFaceTextureDescription{}//????
impl std::hash::Hash for RobloxFaceTextureDescription{
fn hash<H:std::hash::Hasher>(&self,state:&mut H){
self.render.hash(state);
self.transform.hash(state);
for &el in self.color.as_ref().iter(){
el.to_ne_bytes().hash(state);
}
}
}
impl RobloxFaceTextureDescription{
fn to_face_description(&self)->primitives::FaceDescription{
primitives::FaceDescription{
render:self.render,
transform:glam::Affine2::from_translation(
glam::vec2(self.transform.offset_u,self.transform.offset_v)
)
*glam::Affine2::from_scale(
glam::vec2(self.transform.scale_u,self.transform.scale_v)
),
color:self.color,
}
}
}
type RobloxPartDescription=[Option<RobloxFaceTextureDescription>;6];
type RobloxWedgeDescription=[Option<RobloxFaceTextureDescription>;5];
type RobloxCornerWedgeDescription=[Option<RobloxFaceTextureDescription>;5];
#[derive(Clone,Eq,Hash,PartialEq)]
enum RobloxBasePartDescription{
Sphere(RobloxPartDescription),
Part(RobloxPartDescription),
Cylinder(RobloxPartDescription),
Wedge(RobloxWedgeDescription),
CornerWedge(RobloxCornerWedgeDescription),
}
enum Shape{
Primitive(primitives::Primitives),
MeshPart,
}
enum MeshAvailability{
Immediate,
Deferred(RenderConfigId),
}
struct DeferredModelDeferredAttributes{
render:RenderConfigId,
model:ModelDeferredAttributes,
}
struct ModelDeferredAttributes{
mesh:model::MeshId,
deferred_attributes:GetAttributesArgs,
color:model::Color4,//transparency is in here
transform:Planar64Affine3,
}
struct ModelOwnedAttributes{
mesh:model::MeshId,
attributes:attr::CollisionAttributes,
color:model::Color4,//transparency is in here
transform:Planar64Affine3,
}
struct GetAttributesArgs{
name:Box<str>,
can_collide:bool,
velocity:Planar64Vec3,
}
pub fn convert<AcquireRenderConfigId,AcquireMeshId>(
dom:&rbx_dom_weak::WeakDom,
mut acquire_render_config_id:AcquireRenderConfigId,
mut acquire_mesh_id:AcquireMeshId,
)->PartialMap1
where
AcquireRenderConfigId:FnMut(Option<&str>)->model::RenderConfigId,
AcquireMeshId:FnMut(&str)->model::MeshId,
{
let mut deferred_models_deferred_attributes=Vec::new();
let mut primitive_models_deferred_attributes=Vec::new();
let mut primitive_meshes=Vec::new();
let mut mesh_id_from_description=HashMap::new();
//just going to leave it like this for now instead of reworking the data structures for this whole thing
let textureless_render_group=acquire_render_config_id(None);
let mut object_refs=Vec::new();
let mut temp_objects=Vec::new();
recursive_collect_superclass(&mut object_refs, &dom, dom.root(),"BasePart");
for object_ref in object_refs {
if let Some(object)=dom.get_by_ref(object_ref){
if let (
Some(rbx_dom_weak::types::Variant::CFrame(cf)),
Some(rbx_dom_weak::types::Variant::Vector3(size)),
Some(rbx_dom_weak::types::Variant::Vector3(velocity)),
Some(rbx_dom_weak::types::Variant::Float32(transparency)),
Some(rbx_dom_weak::types::Variant::Color3uint8(color3)),
Some(rbx_dom_weak::types::Variant::Bool(can_collide)),
) = (
object.properties.get("CFrame"),
object.properties.get("Size"),
object.properties.get("Velocity"),
object.properties.get("Transparency"),
object.properties.get("Color"),
object.properties.get("CanCollide"),
)
{
let model_transform=planar64_affine3_from_roblox(cf,size);
if model_transform.matrix3.det().is_zero(){
let mut parent_ref=object.parent();
let mut full_path=object.name.clone();
while let Some(parent)=dom.get_by_ref(parent_ref){
full_path=format!("{}.{}",parent.name,full_path);
parent_ref=parent.parent();
}
println!("Zero determinant CFrame at location {}",full_path);
println!("matrix3:{}",model_transform.matrix3);
continue;
}
//at this point a new model is going to be generated for sure.
let model_id=model::ModelId::new(primitive_models_deferred_attributes.len() as u32);
//TODO: also detect "CylinderMesh" etc here
let shape=match object.class.as_str(){
"Part"=>if let Some(rbx_dom_weak::types::Variant::Enum(shape))=object.properties.get("Shape"){
Shape::Primitive(match shape.to_u32(){
0=>primitives::Primitives::Sphere,
1=>primitives::Primitives::Cube,
2=>primitives::Primitives::Cylinder,
3=>primitives::Primitives::Wedge,
4=>primitives::Primitives::CornerWedge,
other=>panic!("Funky roblox PartType={};",other),
})
}else{
panic!("Part has no Shape!");
},
"TrussPart"=>Shape::Primitive(primitives::Primitives::Cube),
"WedgePart"=>Shape::Primitive(primitives::Primitives::Wedge),
"CornerWedgePart"=>Shape::Primitive(primitives::Primitives::CornerWedge),
"MeshPart"=>Shape::MeshPart,
_=>{
println!("Unsupported BasePart ClassName={}; defaulting to cube",object.class);
Shape::Primitive(primitives::Primitives::Cube)
}
};
let (availability,mesh_id)=match shape{
Shape::Primitive(primitive_shape)=>{
//TODO: TAB TAB
//use the biggest one and cut it down later...
let mut part_texture_description:RobloxPartDescription=[None,None,None,None,None,None];
temp_objects.clear();
recursive_collect_superclass(&mut temp_objects, &dom, object,"Decal");
for &decal_ref in &temp_objects{
if let Some(decal)=dom.get_by_ref(decal_ref){
if let (
Some(rbx_dom_weak::types::Variant::Content(content)),
Some(rbx_dom_weak::types::Variant::Enum(normalid)),
Some(rbx_dom_weak::types::Variant::Color3(decal_color3)),
Some(rbx_dom_weak::types::Variant::Float32(decal_transparency)),
) = (
decal.properties.get("Texture"),
decal.properties.get("Face"),
decal.properties.get("Color3"),
decal.properties.get("Transparency"),
) {
let render_id=acquire_render_config_id(Some(content.as_ref()));
let normal_id=normalid.to_u32();
if normal_id<6{
let (roblox_texture_color,roblox_texture_transform)=if decal.class=="Texture"{
//generate tranform
if let (
Some(rbx_dom_weak::types::Variant::Float32(ox)),
Some(rbx_dom_weak::types::Variant::Float32(oy)),
Some(rbx_dom_weak::types::Variant::Float32(sx)),
Some(rbx_dom_weak::types::Variant::Float32(sy)),
) = (
decal.properties.get("OffsetStudsU"),
decal.properties.get("OffsetStudsV"),
decal.properties.get("StudsPerTileU"),
decal.properties.get("StudsPerTileV"),
)
{
let (size_u,size_v)=match normal_id{
0=>(size.z,size.y),//right
1=>(size.x,size.z),//top
2=>(size.x,size.y),//back
3=>(size.z,size.y),//left
4=>(size.x,size.z),//bottom
5=>(size.x,size.y),//front
_=>unreachable!(),
};
(
glam::vec4(decal_color3.r,decal_color3.g,decal_color3.b,1.0-*decal_transparency),
RobloxTextureTransform{
offset_u:*ox/(*sx),offset_v:*oy/(*sy),
scale_u:size_u/(*sx),scale_v:size_v/(*sy),
}
)
}else{
(glam::Vec4::ONE,RobloxTextureTransform::default())
}
}else{
(glam::Vec4::ONE,RobloxTextureTransform::default())
};
part_texture_description[normal_id as usize]=Some(RobloxFaceTextureDescription{
render:render_id,
color:roblox_texture_color,
transform:roblox_texture_transform,
});
}else{
println!("NormalId={} unsupported for shape={:?}",normal_id,primitive_shape);
}
}
}
}
//obscure rust syntax "slice pattern"
let [
f0,//Cube::Right
f1,//Cube::Top
f2,//Cube::Back
f3,//Cube::Left
f4,//Cube::Bottom
f5,//Cube::Front
]=part_texture_description;
let basepart_description=match primitive_shape{
primitives::Primitives::Sphere=>RobloxBasePartDescription::Sphere([f0,f1,f2,f3,f4,f5]),
primitives::Primitives::Cube=>RobloxBasePartDescription::Part([f0,f1,f2,f3,f4,f5]),
primitives::Primitives::Cylinder=>RobloxBasePartDescription::Cylinder([f0,f1,f2,f3,f4,f5]),
//use front face texture first and use top face texture as a fallback
primitives::Primitives::Wedge=>RobloxBasePartDescription::Wedge([
f0,//Cube::Right->Wedge::Right
if f5.is_some(){f5}else{f1},//Cube::Front|Cube::Top->Wedge::TopFront
f2,//Cube::Back->Wedge::Back
f3,//Cube::Left->Wedge::Left
f4,//Cube::Bottom->Wedge::Bottom
]),
//TODO: fix Left+Back texture coordinates to match roblox when not overwridden by Top
primitives::Primitives::CornerWedge=>RobloxBasePartDescription::CornerWedge([
f0,//Cube::Right->CornerWedge::Right
if f2.is_some(){f2}else{f1.clone()},//Cube::Back|Cube::Top->CornerWedge::TopBack
if f3.is_some(){f3}else{f1},//Cube::Left|Cube::Top->CornerWedge::TopLeft
f4,//Cube::Bottom->CornerWedge::Bottom
f5,//Cube::Front->CornerWedge::Front
]),
};
//make new model if unit cube has not been created before
let mesh_id=if let Some(&mesh_id)=mesh_id_from_description.get(&basepart_description){
//push to existing texture model
mesh_id
}else{
let mesh_id=model::MeshId::new(primitive_meshes.len() as u32);
mesh_id_from_description.insert(basepart_description.clone(),mesh_id);//borrow checker going crazy
let mesh=match basepart_description{
RobloxBasePartDescription::Sphere(part_texture_description)
|RobloxBasePartDescription::Cylinder(part_texture_description)
|RobloxBasePartDescription::Part(part_texture_description)=>{
let mut cube_face_description=primitives::CubeFaceDescription::default();
for (face_id,roblox_face_description) in part_texture_description.iter().enumerate(){
cube_face_description.insert(
match face_id{
0=>primitives::CubeFace::Right,
1=>primitives::CubeFace::Top,
2=>primitives::CubeFace::Back,
3=>primitives::CubeFace::Left,
4=>primitives::CubeFace::Bottom,
5=>primitives::CubeFace::Front,
_=>unreachable!(),
},
match roblox_face_description{
Some(roblox_texture_transform)=>roblox_texture_transform.to_face_description(),
None=>primitives::FaceDescription::new_with_render_id(textureless_render_group),
});
}
primitives::generate_partial_unit_cube(cube_face_description)
},
RobloxBasePartDescription::Wedge(wedge_texture_description)=>{
let mut wedge_face_description=primitives::WedgeFaceDescription::default();
for (face_id,roblox_face_description) in wedge_texture_description.iter().enumerate(){
wedge_face_description.insert(
match face_id{
0=>primitives::WedgeFace::Right,
1=>primitives::WedgeFace::TopFront,
2=>primitives::WedgeFace::Back,
3=>primitives::WedgeFace::Left,
4=>primitives::WedgeFace::Bottom,
_=>unreachable!(),
},
match roblox_face_description{
Some(roblox_texture_transform)=>roblox_texture_transform.to_face_description(),
None=>primitives::FaceDescription::new_with_render_id(textureless_render_group),
});
}
primitives::generate_partial_unit_wedge(wedge_face_description)
},
RobloxBasePartDescription::CornerWedge(cornerwedge_texture_description)=>{
let mut cornerwedge_face_description=primitives::CornerWedgeFaceDescription::default();
for (face_id,roblox_face_description) in cornerwedge_texture_description.iter().enumerate(){
cornerwedge_face_description.insert(
match face_id{
0=>primitives::CornerWedgeFace::Right,
1=>primitives::CornerWedgeFace::TopBack,
2=>primitives::CornerWedgeFace::TopLeft,
3=>primitives::CornerWedgeFace::Bottom,
4=>primitives::CornerWedgeFace::Front,
_=>unreachable!(),
},
match roblox_face_description{
Some(roblox_texture_transform)=>roblox_texture_transform.to_face_description(),
None=>primitives::FaceDescription::new_with_render_id(textureless_render_group),
});
}
primitives::generate_partial_unit_cornerwedge(cornerwedge_face_description)
},
};
primitive_meshes.push(mesh);
mesh_id
};
(MeshAvailability::Immediate,mesh_id)
},
Shape::MeshPart=>if let (
Some(rbx_dom_weak::types::Variant::Content(mesh_asset_id)),
Some(rbx_dom_weak::types::Variant::Content(texture_asset_id)),
)=(
object.properties.get("MeshId"),
object.properties.get("TextureID"),
){
(
MeshAvailability::Deferred(acquire_render_config_id(Some(texture_asset_id.as_ref()))),
acquire_mesh_id(mesh_asset_id.as_ref()),
)
}else{
panic!("Mesh has no Mesh or Texture");
},
};
let model_deferred_attributes=ModelDeferredAttributes{
mesh:mesh_id,
transform:model_transform,
color:glam::vec4(color3.r as f32/255f32, color3.g as f32/255f32, color3.b as f32/255f32, 1.0-*transparency),
deferred_attributes:GetAttributesArgs{
name:object.name.as_str().into(),
can_collide:*can_collide,
velocity:vec3::try_from_f32_array([velocity.x,velocity.y,velocity.z]).unwrap(),
},
};
match availability{
MeshAvailability::Immediate=>primitive_models_deferred_attributes.push(model_deferred_attributes),
MeshAvailability::Deferred(render)=>deferred_models_deferred_attributes.push(DeferredModelDeferredAttributes{
render,
model:model_deferred_attributes
}),
}
}
}
}
PartialMap1{
primitive_meshes,
primitive_models_deferred_attributes,
deferred_models_deferred_attributes,
}
}
struct MeshWithAabb{
mesh:model::Mesh,
aabb:strafesnet_common::aabb::Aabb,
}
pub struct PartialMap1{
primitive_meshes:Vec<model::Mesh>,
primitive_models_deferred_attributes:Vec<ModelDeferredAttributes>,
deferred_models_deferred_attributes:Vec<DeferredModelDeferredAttributes>,
}
impl PartialMap1{
pub fn add_meshpart_meshes_and_calculate_attributes(
mut self,
meshpart_meshes:impl IntoIterator<Item=(model::MeshId,crate::data::RobloxMeshBytes)>,
)->PartialMap2{
//calculate attributes
let mut modes_builder=ModesBuilder::default();
let mut unique_attributes=Vec::new();
let mut attributes_id_from_attributes=HashMap::new();
let mut wormhole_in_model_to_id=HashMap::new();
let mut wormhole_id_to_out_model=HashMap::new();
//decode roblox meshes
//generate mesh_id_map based on meshes that failed to load
let loaded_meshes:HashMap<model::MeshId,MeshWithAabb>=
meshpart_meshes.into_iter().flat_map(|(old_mesh_id,roblox_mesh_bytes)|
match crate::mesh::convert(roblox_mesh_bytes){
Ok(mesh)=>{
let mut aabb=strafesnet_common::aabb::Aabb::default();
for &pos in &mesh.unique_pos{
aabb.grow(pos);
}
Some((old_mesh_id,MeshWithAabb{
mesh,
aabb,
}))
},
Err(e)=>{
println!("Error converting mesh: {e:?}");
None
},
}
).collect();
let mut mesh_id_from_render_config_id=HashMap::new();
//ignore meshes that fail to load completely for now
let mut acquire_mesh_id_from_render_config_id=|old_mesh_id,render|{
loaded_meshes.get(&old_mesh_id).map(|mesh_with_aabb|(
*mesh_id_from_render_config_id.entry(old_mesh_id).or_insert_with(||HashMap::new())
.entry(render).or_insert_with(||{
let mesh_id=model::MeshId::new(self.primitive_meshes.len() as u32);
let mut mesh_clone=mesh_with_aabb.mesh.clone();
//add a render group lool
mesh_clone.graphics_groups.push(model::IndexedGraphicsGroup{
render,
//the lowest lod is highest quality
groups:vec![model::PolygonGroupId::new(0)]
});
self.primitive_meshes.push(mesh_clone);
mesh_id
}),
&mesh_with_aabb.aabb,
))
};
//now that the meshes are loaded, these models can be generated
let models_owned_attributes:Vec<ModelOwnedAttributes>=
self.deferred_models_deferred_attributes.into_iter().flat_map(|deferred_model_deferred_attributes|{
//meshes need to be cloned from loaded_meshes with a new id when they are used with a new render_id
//insert into primitive_meshes
let (mesh,aabb)=acquire_mesh_id_from_render_config_id(
deferred_model_deferred_attributes.model.mesh,
deferred_model_deferred_attributes.render
)?;
let size=aabb.size();
Some(ModelDeferredAttributes{
mesh,
deferred_attributes:deferred_model_deferred_attributes.model.deferred_attributes,
color:deferred_model_deferred_attributes.model.color,
transform:Planar64Affine3::new(
Planar64Mat3::from_cols([
(deferred_model_deferred_attributes.model.transform.matrix3.x_axis*2/size.x).divide().fix_1(),
(deferred_model_deferred_attributes.model.transform.matrix3.y_axis*2/size.y).divide().fix_1(),
(deferred_model_deferred_attributes.model.transform.matrix3.z_axis*2/size.z).divide().fix_1()
]),
deferred_model_deferred_attributes.model.transform.translation
),
})
}).chain(self.primitive_models_deferred_attributes.into_iter())
.enumerate().map(|(model_id,model_deferred_attributes)|{
let model_id=model::ModelId::new(model_id as u32);
ModelOwnedAttributes{
mesh:model_deferred_attributes.mesh,
attributes:get_attributes(
&model_deferred_attributes.deferred_attributes.name,
model_deferred_attributes.deferred_attributes.can_collide,
model_deferred_attributes.deferred_attributes.velocity,
model_id,
&mut modes_builder,
&mut wormhole_in_model_to_id,
&mut wormhole_id_to_out_model,
),
color:model_deferred_attributes.color,
transform:model_deferred_attributes.transform,
}
}).collect();
let models=models_owned_attributes.into_iter().enumerate().map(|(model_id,mut model_owned_attributes)|{
//TODO: TAB
let model_id=model::ModelId::new(model_id as u32);
//update attributes with wormhole id
//TODO: errors/prints
if let Some(wormhole_id)=wormhole_in_model_to_id.get(&model_id){
if let Some(&wormhole_out_model_id)=wormhole_id_to_out_model.get(wormhole_id){
match &mut model_owned_attributes.attributes{
attr::CollisionAttributes::Contact(attr::ContactAttributes{contacting:_,general})
|attr::CollisionAttributes::Intersect(attr::IntersectAttributes{intersecting:_,general})
=>general.wormhole=Some(attr::Wormhole{destination_model:wormhole_out_model_id}),
attr::CollisionAttributes::Decoration=>println!("Not a wormhole"),
}
}
}
//index the attributes
let attributes_id=if let Some(&attributes_id)=attributes_id_from_attributes.get(&model_owned_attributes.attributes){
attributes_id
}else{
let attributes_id=attr::CollisionAttributesId::new(unique_attributes.len() as u32);
attributes_id_from_attributes.insert(model_owned_attributes.attributes.clone(),attributes_id);
unique_attributes.push(model_owned_attributes.attributes);
attributes_id
};
model::Model{
mesh:model_owned_attributes.mesh,
transform:model_owned_attributes.transform,
color:model_owned_attributes.color,
attributes:attributes_id,
}
}).collect();
PartialMap2{
meshes:self.primitive_meshes,
models,
modes:modes_builder.build(),
attributes:unique_attributes,
}
}
}
pub struct PartialMap2{
meshes:Vec<model::Mesh>,
models:Vec<model::Model>,
modes:gameplay_modes::Modes,
attributes:Vec<strafesnet_common::gameplay_attributes::CollisionAttributes>,
}
impl PartialMap2{
pub fn add_render_configs_and_textures(
self,
render_configs:impl IntoIterator<Item=(model::RenderConfigId,model::RenderConfig)>,
textures:impl IntoIterator<Item=(model::TextureId,Vec<u8>)>,
)->map::CompleteMap{
let (textures,texture_id_map):(Vec<Vec<u8>>,HashMap<model::TextureId,model::TextureId>)
=textures.into_iter().enumerate().map(|(new_texture_id,(old_texture_id,texture))|{
(texture,(old_texture_id,model::TextureId::new(new_texture_id as u32)))
}).unzip();
let render_configs=render_configs.into_iter().map(|(render_config_id,mut render_config)|{
//this may generate duplicate no-texture render configs but idc
render_config.texture=render_config.texture.and_then(|texture_id|
texture_id_map.get(&texture_id).copied()
);
render_config
}).collect();
map::CompleteMap{
modes:self.modes,
attributes:self.attributes,
meshes:self.meshes,
models:self.models,
//the roblox legacy texture thing always works
textures,
render_configs,
}
}
}

View File

@ -1 +0,0 @@
/target

View File

@ -1,21 +0,0 @@
[package]
name = "roblox_emulator"
version = "0.4.7"
edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Run embedded Luau scripts which manipulate the DOM."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
[features]
default=["run-service"]
run-service=[]
[dependencies]
glam = "0.29.0"
mlua = { version = "0.10.1", features = ["luau"] }
phf = { version = "0.11.2", features = ["macros"] }
rbx_dom_weak = { version = "2.7.0", registry = "strafesnet" }
rbx_reflection = { version = "4.7.0", registry = "strafesnet" }
rbx_reflection_database = { version = "0.2.10", registry = "strafesnet" }
rbx_types = { version = "1.10.0", registry = "strafesnet" }

Some files were not shown because too many files have changed in this diff Show More