Compare commits

..

51 Commits

Author SHA1 Message Date
f98ffe6e0b fix spin bug edge case 2025-01-15 00:48:03 -08:00
f9d4ca8370 remove single use function 2025-01-15 00:36:15 -08:00
fea5bf4398 low polling rate edge case 2025-01-15 00:29:45 -08:00
d393f9f187 change some function names 2025-01-15 00:26:40 -08:00
ada34237c9 fix timeout timestamp 2025-01-15 00:02:26 -08:00
292df72709 transpose next buffer state calculation 2025-01-14 23:49:31 -08:00
7476d7cdc7 the mouse spin bug 2025-01-14 23:49:31 -08:00
6138d70a6f unify timeout 😍 2025-01-14 23:49:31 -08:00
cac4d698c3 fix overall correctness 2025-01-14 23:23:50 -08:00
e0ea0d6119 remove holdover case 2025-01-14 23:00:46 -08:00
80a4431ee8 test mouse_interpolator 2025-01-14 23:00:46 -08:00
80424cf24c spawn on map change 2025-01-14 21:41:55 -08:00
c338826513 finish 2025-01-14 21:16:36 -08:00
a6a242175b rewrite enums again 2025-01-14 20:44:17 -08:00
08bd57ffe1 remove incorrect comment 2025-01-14 19:02:34 -08:00
0d9c6648e2 accumulate mouse_pos as float 2025-01-14 19:00:17 -08:00
405cba3549 discover ternary method on bool 2025-01-14 18:39:38 -08:00
38d8dc1302 InstructionCache 2025-01-14 18:26:59 -08:00
33ccefc411 pop_buffered_instruction can be accomplished with mem::replace 2025-01-14 03:44:23 -08:00
93277c042b make pain code smaller 2025-01-14 01:45:09 -08:00
90f6437817 wrong instruction 2025-01-14 01:36:50 -08:00
29f9d5298f work 2025-01-14 01:34:26 -08:00
b0489a3746 work work work 2025-01-13 23:59:16 -08:00
a8847d3632 ruin physics code 2025-01-13 23:55:42 -08:00
fb8c2a619a rename fields in MouseInstruction::ReplaceMouse 2025-01-13 23:51:13 -08:00
6898302fa5 move code to more relevant location 2025-01-13 23:23:58 -08:00
52bbaaddc7 don't mutate physics_timeline on the fly 2025-01-13 23:23:58 -08:00
a8581a2a4f don't reconstruct MouseState struct with noop 2025-01-13 23:23:58 -08:00
c6ff11dd3e use replace_with to replace the enum variant in-place without cloning 2025-01-13 23:11:43 -08:00
844c7a08e1 add replace_with dep 2025-01-13 22:46:16 -08:00
bd61d03c91 work 2025-01-13 22:32:26 -08:00
b58ebb2775 todos 2025-01-11 01:50:06 -08:00
9095215cad write pop_buffered_instruction 2025-01-11 01:38:45 -08:00
92c30c3b87 cool changes 2025-01-11 01:07:06 -08:00
1b35c96f6e cook a bit 2025-01-10 23:18:53 -08:00
47bf9f1af3 pain 2025-01-10 22:08:54 -08:00
719c702b95 actually need ReplaceMouse because of OS level issue
The operating system does not report the timestamp at which it checks that the mouse was not moving, so the mouse interpolation will necessarily be incorrect for up to 1 polling period.  The alternative is to guess / make up a timestamp, but I don't want to do this.
2025-01-10 22:01:02 -08:00
ceb2499ad2 delete ReplaceMouse instruction 2025-01-10 20:59:25 -08:00
fe43ce9df6 progress 2025-01-10 20:03:53 -08:00
1fcd18bc45 how does it work 2025-01-09 21:20:25 -08:00
e371f95a4b a 2025-01-09 21:14:17 -08:00
b02c1bc7b4 idk if dropinstruction is gonan work 2025-01-09 21:14:15 -08:00
89446a933a a 2025-01-09 20:48:11 -08:00
0a3d965bb6 work 2025-01-09 20:48:11 -08:00
b6206d52c8 work 2025-01-09 20:48:11 -08:00
498c628280 asd 2025-01-09 20:48:11 -08:00
273e915f67 no 2025-01-09 20:48:11 -08:00
5072e5d7a8 yeah 2025-01-09 20:48:11 -08:00
3f0e3e0d3c update mouse interpolator code 2025-01-09 20:48:11 -08:00
2e88ae0612 wip 2025-01-09 20:48:11 -08:00
4c216a5b28 wip 2025-01-09 20:48:11 -08:00
128 changed files with 3179 additions and 9167 deletions

@ -1,2 +1,6 @@
[registries.strafesnet] [registries.strafesnet]
index = "sparse+https://git.itzana.me/api/packages/strafesnet/cargo/" index = "sparse+https://git.itzana.me/api/packages/strafesnet/cargo/"
[target.x86_64-unknown-linux-gnu]
linker = "clang"
rustflags = ["-C", "link-arg=-fuse-ld=/usr/bin/mold"]

2552
Cargo.lock generated

File diff suppressed because it is too large Load Diff

@ -1,10 +1,5 @@
[workspace] [workspace]
members = [ members = [
"engine/graphics",
"engine/physics",
"engine/session",
"engine/settings",
"integration-testing",
"lib/bsp_loader", "lib/bsp_loader",
"lib/common", "lib/common",
"lib/deferred_loader", "lib/deferred_loader",
@ -12,10 +7,8 @@ members = [
"lib/linear_ops", "lib/linear_ops",
"lib/ratio_ops", "lib/ratio_ops",
"lib/rbx_loader", "lib/rbx_loader",
"lib/rbxassetid",
"lib/roblox_emulator", "lib/roblox_emulator",
"lib/snf", "lib/snf",
"map-tool",
"strafe-client", "strafe-client",
] ]
resolver = "2" resolver = "2"
@ -24,7 +17,3 @@ resolver = "2"
#lto = true #lto = true
strip = true strip = true
codegen-units = 1 codegen-units = 1
[profile.dev]
strip = false
opt-level = 3

@ -3,9 +3,6 @@
# Strafe Project # Strafe Project
Monorepo for working on projects related to strafe client. Monorepo for working on projects related to strafe client.
## Try it out
See [releases](https://git.itzana.me/StrafesNET/strafe-project/releases) for downloads.
## How to build and run ## How to build and run
1. Have rust and git installed 1. Have rust and git installed
2. `git clone https://git.itzana.me/StrafesNET/strafe-project` 2. `git clone https://git.itzana.me/StrafesNET/strafe-project`
@ -13,4 +10,4 @@ See [releases](https://git.itzana.me/StrafesNET/strafe-project/releases) for dow
4. `cargo run --release --bin strafe-client` 4. `cargo run --release --bin strafe-client`
## Licenses ## Licenses
Each project has its own license. Most crates are MIT/Apache but notably the Strafe Client and engine crates have a sole proprietor license. Each project has its own license. Most crates are MIT/Apache but notably the Strafe Client has a sole proprietor license.

@ -1,14 +0,0 @@
[package]
name = "strafesnet_graphics"
version = "0.1.0"
edition = "2024"
[dependencies]
bytemuck = { version = "1.13.1", features = ["derive"] }
ddsfile = "0.5.1"
glam = "0.30.0"
id = { version = "0.1.0", registry = "strafesnet" }
strafesnet_common = { path = "../../lib/common", registry = "strafesnet" }
strafesnet_session = { path = "../session", registry = "strafesnet" }
strafesnet_settings = { path = "../settings", registry = "strafesnet" }
wgpu = "24.0.0"

@ -1,8 +0,0 @@
/*******************************************************
* Copyright (C) 2023-2024 Rhys Lloyd <krakow20@gmail.com>
*
* This file is part of the StrafesNET bhop/surf client.
*
* StrafesNET can not be copied and/or distributed
* without the express permission of Rhys Lloyd
*******************************************************/

@ -1,2 +0,0 @@
pub mod model;
pub mod graphics;

@ -1,10 +0,0 @@
[package]
name = "strafesnet_physics"
version = "0.1.0"
edition = "2024"
[dependencies]
arrayvec = "0.7.6"
glam = "0.30.0"
id = { version = "0.1.0", registry = "strafesnet" }
strafesnet_common = { path = "../../lib/common", registry = "strafesnet" }

@ -1,8 +0,0 @@
/*******************************************************
* Copyright (C) 2023-2024 Rhys Lloyd <krakow20@gmail.com>
*
* This file is part of the StrafesNET bhop/surf client.
*
* StrafesNET can not be copied and/or distributed
* without the express permission of Rhys Lloyd
*******************************************************/

@ -1,45 +0,0 @@
mod body;
mod push_solve;
mod face_crawler;
mod model;
pub mod physics;
// Physics bug fixes can easily desync all bots.
//
// When replaying a bot, use the exact physics version which it was recorded with.
//
// When validating a new bot, ignore the version and use the latest version,
// and overwrite the version in the file.
//
// Compatible physics versions should be determined
// empirically at development time via leaderboard resimulation.
//
// Compatible physics versions should result in an identical leaderboard state,
// or the only bots which fail are ones exploiting a surgically patched bug.
#[derive(Clone,Copy,Hash,Debug,id::Id,Eq,PartialEq,Ord,PartialOrd)]
pub struct PhysicsVersion(u32);
pub const VERSION:PhysicsVersion=PhysicsVersion(0);
const LATEST_COMPATIBLE_VERSION:[u32;1+VERSION.0 as usize]=const{
let compat=[0];
let mut input_version=0;
while input_version<compat.len(){
// compatible version must be greater than or equal to the input version
assert!(input_version as u32<=compat[input_version]);
// compatible version must be a version that exists
assert!(compat[input_version]<=VERSION.0);
input_version+=1;
}
compat
};
pub enum PhysicsVersionError{
UnknownPhysicsVersion,
}
pub const fn get_latest_compatible_version(PhysicsVersion(version):PhysicsVersion)->Result<PhysicsVersion,PhysicsVersionError>{
if (version as usize)<LATEST_COMPATIBLE_VERSION.len(){
Ok(PhysicsVersion(LATEST_COMPATIBLE_VERSION[version as usize]))
}else{
Err(PhysicsVersionError::UnknownPhysicsVersion)
}
}

@ -1,12 +0,0 @@
[package]
name = "strafesnet_session"
version = "0.1.0"
edition = "2024"
[dependencies]
glam = "0.30.0"
replace_with = "0.1.7"
strafesnet_common = { path = "../../lib/common", registry = "strafesnet" }
strafesnet_physics = { path = "../physics", registry = "strafesnet" }
strafesnet_settings = { path = "../settings", registry = "strafesnet" }
strafesnet_snf = { path = "../../lib/snf", registry = "strafesnet" }

@ -1,8 +0,0 @@
/*******************************************************
* Copyright (C) 2023-2024 Rhys Lloyd <krakow20@gmail.com>
*
* This file is part of the StrafesNET bhop/surf client.
*
* StrafesNET can not be copied and/or distributed
* without the express permission of Rhys Lloyd
*******************************************************/

@ -1,2 +0,0 @@
mod mouse_interpolator;
pub mod session;

@ -1,443 +0,0 @@
use std::collections::HashMap;
use strafesnet_common::gameplay_modes::{ModeId,StageId};
use strafesnet_common::instruction::{InstructionConsumer,InstructionEmitter,InstructionFeedback,TimedInstruction};
// session represents the non-hardware state of the client.
// Ideally it is a deterministic state which is atomically updated by instructions, same as the simulation state.
use strafesnet_common::physics::{
ModeInstruction,MiscInstruction,
Instruction as PhysicsInputInstruction,
TimeInner as PhysicsTimeInner,
Time as PhysicsTime
};
use strafesnet_common::timer::{Scaled,Timer};
use strafesnet_common::session::{TimeInner as SessionTimeInner,Time as SessionTime};
use strafesnet_settings::directories::Directories;
use crate::mouse_interpolator::{MouseInterpolator,StepInstruction,Instruction as MouseInterpolatorInstruction};
use strafesnet_physics::physics::{self,PhysicsContext,PhysicsData};
use strafesnet_settings::settings::UserSettings;
pub enum Instruction<'a>{
Input(SessionInputInstruction),
Control(SessionControlInstruction),
Playback(SessionPlaybackInstruction),
ChangeMap(&'a strafesnet_common::map::CompleteMap),
LoadReplay(strafesnet_snf::bot::Segment),
Idle,
}
pub enum SessionInputInstruction{
Mouse(glam::IVec2),
SetControl(strafesnet_common::physics::SetControlInstruction),
Mode(ImplicitModeInstruction),
Misc(strafesnet_common::physics::MiscInstruction),
}
/// Implicit mode instruction are fed separately to session.
/// Session generates the explicit mode instructions interlaced with a SetSensitivity instruction
#[derive(Clone,Debug)]
pub enum ImplicitModeInstruction{
ResetAndRestart,
ResetAndSpawn(ModeId,StageId),
}
pub enum SessionControlInstruction{
SetPaused(bool),
// copy the current session simulation recording into a replay and view it
CopyRecordingIntoReplayAndSpectate,
StopSpectate,
SaveReplay,
LoadIntoReplayState,
}
pub enum SessionPlaybackInstruction{
SkipForward,
SkipBack,
TogglePaused,
DecreaseTimescale,
IncreaseTimescale,
}
pub struct FrameState{
pub body:physics::Body,
pub camera:physics::PhysicsCamera,
pub time:PhysicsTime,
}
pub struct Simulation{
timer:Timer<Scaled<SessionTimeInner,PhysicsTimeInner>>,
physics:physics::PhysicsState,
}
impl Simulation{
pub const fn new(
timer:Timer<Scaled<SessionTimeInner,PhysicsTimeInner>>,
physics:physics::PhysicsState,
)->Self{
Self{
timer,
physics,
}
}
pub fn get_frame_state(&self,time:SessionTime)->FrameState{
FrameState{
body:self.physics.camera_body(),
camera:self.physics.camera(),
time:self.timer.time(time),
}
}
}
#[derive(Default)]
pub struct Recording{
instructions:Vec<TimedInstruction<PhysicsInputInstruction,PhysicsTime>>,
}
impl Recording{
pub fn new(
instructions:Vec<TimedInstruction<PhysicsInputInstruction,PhysicsTime>>,
)->Self{
Self{instructions}
}
fn clear(&mut self){
self.instructions.clear();
}
}
pub struct Replay{
next_instruction_id:usize,
recording:Recording,
simulation:Simulation,
}
impl Replay{
pub const fn new(
recording:Recording,
simulation:Simulation,
)->Self{
Self{
next_instruction_id:0,
recording,
simulation,
}
}
pub fn advance(&mut self,physics_data:&PhysicsData,time_limit:SessionTime){
let mut time=self.simulation.timer.time(time_limit);
loop{
if let Some(ins)=self.recording.instructions.get(self.next_instruction_id){
if ins.time<time{
PhysicsContext::run_input_instruction(&mut self.simulation.physics,physics_data,ins.clone());
self.next_instruction_id+=1;
}else{
break;
}
}else{
// loop playback
self.next_instruction_id=0;
// No need to reset physics because the very first instruction is 'Reset'
let new_time=self.recording.instructions.first().map_or(PhysicsTime::ZERO,|ins|ins.time);
self.simulation.timer.set_time(time_limit,new_time);
time=new_time;
}
}
}
}
#[derive(Clone,Copy,Hash,PartialEq,Eq)]
struct BotId(u32);
//#[derive(Clone,Copy,Hash,PartialEq,Eq)]
//struct PlayerId(u32);
enum ViewState{
Play,
//Spectate(PlayerId),
Replay(BotId),
}
pub struct Session{
directories:Directories,
user_settings:UserSettings,
mouse_interpolator:crate::mouse_interpolator::MouseInterpolator,
view_state:ViewState,
//gui:GuiState
geometry_shared:physics::PhysicsData,
simulation:Simulation,
// below fields not included in lite session
recording:Recording,
//players:HashMap<PlayerId,Simulation>,
replays:HashMap<BotId,Replay>,
}
impl Session{
pub fn new(
user_settings:UserSettings,
directories:Directories,
simulation:Simulation,
)->Self{
Self{
user_settings,
directories,
mouse_interpolator:MouseInterpolator::new(),
geometry_shared:Default::default(),
simulation,
view_state:ViewState::Play,
recording:Default::default(),
replays:HashMap::new(),
}
}
fn clear_recording(&mut self){
self.recording.clear();
}
fn change_map(&mut self,map:&strafesnet_common::map::CompleteMap){
self.simulation.physics.clear();
self.geometry_shared.generate_models(map);
}
pub fn get_frame_state(&self,time:SessionTime)->Option<FrameState>{
match &self.view_state{
ViewState::Play=>Some(self.simulation.get_frame_state(time)),
ViewState::Replay(bot_id)=>self.replays.get(bot_id).map(|replay|
replay.simulation.get_frame_state(time)
),
}
}
pub fn user_settings(&self)->&UserSettings{
&self.user_settings
}
}
// mouseinterpolator consumes RawInputInstruction
// mouseinterpolator emits PhysicsInputInstruction
// mouseinterpolator consumes DoStep to move on to the next emitted instruction
// Session comsumes SessionInstruction -> forwards RawInputInstruction to mouseinterpolator
// Session consumes DoStep -> forwards DoStep to mouseinterpolator
// Session emits DoStep
impl InstructionConsumer<Instruction<'_>> for Session{
type Time=SessionTime;
fn process_instruction(&mut self,ins:TimedInstruction<Instruction,Self::Time>){
// repetitive procedure macro
macro_rules! run_mouse_interpolator_instruction{
($instruction:expr)=>{
self.mouse_interpolator.process_instruction(TimedInstruction{
time:ins.time,
instruction:TimedInstruction{
time:self.simulation.timer.time(ins.time),
instruction:$instruction,
},
});
};
}
// process any timeouts that occured since the last instruction
self.process_exhaustive(ins.time);
match ins.instruction{
// send it down to MouseInterpolator with two timestamps, SessionTime and PhysicsTime
Instruction::Input(SessionInputInstruction::Mouse(pos))=>{
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::MoveMouse(pos));
},
Instruction::Input(SessionInputInstruction::SetControl(set_control_instruction))=>{
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::SetControl(set_control_instruction));
},
Instruction::Input(SessionInputInstruction::Mode(ImplicitModeInstruction::ResetAndRestart))=>{
self.clear_recording();
let mode_id=self.simulation.physics.mode();
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Mode(ModeInstruction::Reset));
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Misc(MiscInstruction::SetSensitivity(self.user_settings().calculate_sensitivity())));
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Mode(ModeInstruction::Restart(mode_id)));
},
Instruction::Input(SessionInputInstruction::Mode(ImplicitModeInstruction::ResetAndSpawn(mode_id,spawn_id)))=>{
self.clear_recording();
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Mode(ModeInstruction::Reset));
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Misc(MiscInstruction::SetSensitivity(self.user_settings().calculate_sensitivity())));
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Mode(ModeInstruction::Spawn(mode_id,spawn_id)));
},
Instruction::Input(SessionInputInstruction::Misc(misc_instruction))=>{
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Misc(misc_instruction));
},
Instruction::Control(SessionControlInstruction::SetPaused(paused))=>{
// don't flush the buffered instructions in the mouse interpolator
// until the mouse is confirmed to be not moving at a later time
// what if they pause for 5ms lmao
_=self.simulation.timer.set_paused(ins.time,paused);
},
Instruction::Control(SessionControlInstruction::CopyRecordingIntoReplayAndSpectate)=> if let ViewState::Play=self.view_state{
// Bind: B
// pause simulation
_=self.simulation.timer.set_paused(ins.time,true);
// create recording
let mut recording=Recording::default();
recording.instructions.extend(self.recording.instructions.iter().cloned());
// create timer starting at first instruction (or zero if the list is empty)
let new_time=recording.instructions.first().map_or(PhysicsTime::ZERO,|ins|ins.time);
let timer=Timer::unpaused(ins.time,new_time);
// create default physics state
let simulation=Simulation::new(timer,Default::default());
// invent a new bot id and insert the replay
let bot_id=BotId(self.replays.len() as u32);
self.replays.insert(bot_id,Replay::new(
recording,
simulation,
));
// begin spectate
self.view_state=ViewState::Replay(bot_id);
},
Instruction::Control(SessionControlInstruction::StopSpectate)=>{
let view_state=core::mem::replace(&mut self.view_state,ViewState::Play);
// delete the bot, otherwise it's inaccessible and wastes CPU
match view_state{
ViewState::Play=>(),
ViewState::Replay(bot_id)=>{
self.replays.remove(&bot_id);
},
}
_=self.simulation.timer.set_paused(ins.time,false);
},
Instruction::Control(SessionControlInstruction::SaveReplay)=>{
// Bind: N
let view_state=core::mem::replace(&mut self.view_state,ViewState::Play);
match view_state{
ViewState::Play=>(),
ViewState::Replay(bot_id)=>if let Some(replay)=self.replays.remove(&bot_id){
let mut replays_path=self.directories.replays.clone();
let file_name=format!("{}.snfb",ins.time);
std::thread::spawn(move ||{
std::fs::create_dir_all(replays_path.as_path()).unwrap();
replays_path.push(file_name);
let file=std::fs::File::create(replays_path).unwrap();
strafesnet_snf::bot::write_bot(
std::io::BufWriter::new(file),
strafesnet_physics::VERSION.get(),
replay.recording.instructions
).unwrap();
println!("Finished writing bot file!");
});
},
}
_=self.simulation.timer.set_paused(ins.time,false);
},
Instruction::Control(SessionControlInstruction::LoadIntoReplayState)=>{
// Bind: J
let view_state=core::mem::replace(&mut self.view_state,ViewState::Play);
match view_state{
ViewState::Play=>(),
ViewState::Replay(bot_id)=>if let Some(replay)=self.replays.remove(&bot_id){
self.recording.instructions=replay.recording.instructions.into_iter().take(replay.next_instruction_id).collect();
self.simulation=replay.simulation;
},
}
// don't unpause -- use the replay timer state whether it is pasued or unpaused
},
Instruction::Playback(SessionPlaybackInstruction::IncreaseTimescale)=>{
match &self.view_state{
ViewState::Play=>{
// allow simulation timescale for fun
let scale=self.simulation.timer.get_scale();
self.simulation.timer.set_scale(ins.time,strafesnet_common::integer::Ratio64::new(scale.num()*5,scale.den()*4).unwrap());
},
ViewState::Replay(bot_id)=>if let Some(replay)=self.replays.get_mut(bot_id){
let scale=replay.simulation.timer.get_scale();
replay.simulation.timer.set_scale(ins.time,strafesnet_common::integer::Ratio64::new(scale.num()*5,scale.den()*4).unwrap());
},
}
},
Instruction::Playback(SessionPlaybackInstruction::DecreaseTimescale)=>{
match &self.view_state{
ViewState::Play=>{
// allow simulation timescale for fun
let scale=self.simulation.timer.get_scale();
self.simulation.timer.set_scale(ins.time,strafesnet_common::integer::Ratio64::new(scale.num()*4,scale.den()*5).unwrap());
},
ViewState::Replay(bot_id)=>if let Some(replay)=self.replays.get_mut(bot_id){
let scale=replay.simulation.timer.get_scale();
replay.simulation.timer.set_scale(ins.time,strafesnet_common::integer::Ratio64::new(scale.num()*4,scale.den()*5).unwrap());
},
}
},
Instruction::Playback(SessionPlaybackInstruction::SkipForward)=>{
match &self.view_state{
ViewState::Play=>(),
ViewState::Replay(bot_id)=>if let Some(replay)=self.replays.get_mut(bot_id){
let time=replay.simulation.timer.time(ins.time+SessionTime::from_secs(5));
replay.simulation.timer.set_time(ins.time,time);
},
}
},
Instruction::Playback(SessionPlaybackInstruction::SkipBack)=>{
match &self.view_state{
ViewState::Play=>(),
ViewState::Replay(bot_id)=>if let Some(replay)=self.replays.get_mut(bot_id){
let time=replay.simulation.timer.time(ins.time+SessionTime::from_secs(5));
replay.simulation.timer.set_time(ins.time,time);
// resimulate the entire playback lol
replay.next_instruction_id=0;
},
}
},
Instruction::Playback(SessionPlaybackInstruction::TogglePaused)=>{
match &self.view_state{
ViewState::Play=>(),
ViewState::Replay(bot_id)=>if let Some(replay)=self.replays.get_mut(bot_id){
_=replay.simulation.timer.set_paused(ins.time,!replay.simulation.timer.is_paused());
},
}
}
Instruction::ChangeMap(complete_map)=>{
self.clear_recording();
self.change_map(complete_map);
},
Instruction::LoadReplay(bot)=>{
// pause simulation
_=self.simulation.timer.set_paused(ins.time,true);
// create recording
let recording=Recording::new(bot.instructions);
// create timer starting at first instruction (or zero if the list is empty)
let new_time=recording.instructions.first().map_or(PhysicsTime::ZERO,|ins|ins.time);
let timer=Timer::unpaused(ins.time,new_time);
// create default physics state
let simulation=Simulation::new(timer,Default::default());
// invent a new bot id and insert the replay
let bot_id=BotId(self.replays.len() as u32);
self.replays.insert(bot_id,Replay::new(
recording,
simulation,
));
// begin spectate
self.view_state=ViewState::Replay(bot_id);
},
Instruction::Idle=>{
run_mouse_interpolator_instruction!(MouseInterpolatorInstruction::Idle);
// this just refreshes the replays
for replay in self.replays.values_mut(){
// TODO: filter idles from recording, inject new idles in real time
replay.advance(&self.geometry_shared,ins.time);
}
}
};
// process all emitted output instructions
self.process_exhaustive(ins.time);
}
}
impl InstructionConsumer<StepInstruction> for Session{
type Time=SessionTime;
fn process_instruction(&mut self,ins:TimedInstruction<StepInstruction,Self::Time>){
let time=self.simulation.timer.time(ins.time);
if let Some(instruction)=self.mouse_interpolator.pop_buffered_instruction(ins.set_time(time)){
//record
self.recording.instructions.push(instruction.clone());
PhysicsContext::run_input_instruction(&mut self.simulation.physics,&self.geometry_shared,instruction);
}
}
}
impl InstructionEmitter<StepInstruction> for Session{
type Time=SessionTime;
fn next_instruction(&self,time_limit:SessionTime)->Option<TimedInstruction<StepInstruction,Self::Time>>{
self.mouse_interpolator.next_instruction(time_limit)
}
}

@ -1,10 +0,0 @@
[package]
name = "strafesnet_settings"
version = "0.1.0"
edition = "2024"
[dependencies]
configparser = "3.0.2"
directories = "6.0.0"
glam = "0.30.0"
strafesnet_common = { path = "../../lib/common", registry = "strafesnet" }

@ -1,8 +0,0 @@
/*******************************************************
* Copyright (C) 2023-2024 Rhys Lloyd <krakow20@gmail.com>
*
* This file is part of the StrafesNET bhop/surf client.
*
* StrafesNET can not be copied and/or distributed
* without the express permission of Rhys Lloyd
*******************************************************/

@ -1,32 +0,0 @@
use std::path::PathBuf;
use crate::settings::{UserSettings,load_user_settings};
pub struct Directories{
pub settings:PathBuf,
pub maps:PathBuf,
pub replays:PathBuf,
}
impl Directories{
pub fn settings(&self)->UserSettings{
load_user_settings(&self.settings)
}
pub fn user()->Option<Self>{
let dirs=directories::ProjectDirs::from("net.strafes","StrafesNET","Strafe Client")?;
Some(Self{
settings:dirs.config_dir().join("settings.conf"),
maps:dirs.cache_dir().join("maps"),
// separate directory for remote downloaded replays (cache)
// bots:dirs.cache_dir().join("bots"),
replays:dirs.data_local_dir().join("replays"),
})
}
pub fn portable()->Result<Self,std::io::Error>{
let current_dir=std::env::current_dir()?;
Ok(Self{
settings:current_dir.join("settings.conf"),
maps:current_dir.join("maps"),
replays:current_dir.join("replays"),
})
}
}

@ -1,2 +0,0 @@
pub mod settings;
pub mod directories;

@ -1,9 +0,0 @@
[package]
name = "integration-testing"
version = "0.1.0"
edition = "2024"
[dependencies]
strafesnet_common = { path = "../lib/common", registry = "strafesnet" }
strafesnet_physics = { path = "../engine/physics", registry = "strafesnet" }
strafesnet_snf = { path = "../lib/snf", registry = "strafesnet" }

@ -1,253 +0,0 @@
use std::io::Cursor;
use std::path::Path;
use std::time::Instant;
use strafesnet_physics::physics::{PhysicsData,PhysicsState,PhysicsContext};
fn main(){
let arg=std::env::args().skip(1).next();
match arg.as_deref(){
Some("determinism")|None=>test_determinism().unwrap(),
Some("replay")=>run_replay().unwrap(),
_=>println!("invalid argument"),
}
}
#[allow(unused)]
#[derive(Debug)]
enum ReplayError{
IO(std::io::Error),
SNF(strafesnet_snf::Error),
SNFM(strafesnet_snf::map::Error),
SNFB(strafesnet_snf::bot::Error),
}
impl From<std::io::Error> for ReplayError{
fn from(value:std::io::Error)->Self{
Self::IO(value)
}
}
impl From<strafesnet_snf::Error> for ReplayError{
fn from(value:strafesnet_snf::Error)->Self{
Self::SNF(value)
}
}
impl From<strafesnet_snf::map::Error> for ReplayError{
fn from(value:strafesnet_snf::map::Error)->Self{
Self::SNFM(value)
}
}
impl From<strafesnet_snf::bot::Error> for ReplayError{
fn from(value:strafesnet_snf::bot::Error)->Self{
Self::SNFB(value)
}
}
fn read_entire_file(path:impl AsRef<Path>)->Result<Cursor<Vec<u8>>,std::io::Error>{
let data=std::fs::read(path)?;
Ok(Cursor::new(data))
}
fn run_replay()->Result<(),ReplayError>{
println!("loading map file..");
let data=read_entire_file("../tools/bhop_maps/5692113331.snfm")?;
let map=strafesnet_snf::read_map(data)?.into_complete_map()?;
println!("loading bot file..");
let data=read_entire_file("../tools/replays/535s+159764769ns.snfb")?;
let bot=strafesnet_snf::read_bot(data)?.read_all()?;
// create recording
let mut physics_data=PhysicsData::default();
println!("generating models..");
physics_data.generate_models(&map);
println!("simulating...");
let mut physics=PhysicsState::default();
for ins in bot.instructions{
PhysicsContext::run_input_instruction(&mut physics,&physics_data,ins);
}
match physics.get_finish_time(){
Some(time)=>println!("finish time:{}",time),
None=>println!("simulation did not end in finished state"),
}
Ok(())
}
enum DeterminismResult{
Deterministic,
NonDeterministic,
}
struct SegmentResult{
determinism:DeterminismResult,
ticks:u64,
nanoseconds:u64,
}
fn segment_determinism(bot:strafesnet_snf::bot::Segment,physics_data:&PhysicsData)->SegmentResult{
// create default physics state
let mut physics_deterministic=PhysicsState::default();
// create a second physics state
let mut physics_filtered=PhysicsState::default();
// invent a new bot id and insert the replay
println!("simulating...");
let mut non_idle_count=0;
let instruction_count=bot.instructions.len();
let start=Instant::now();
for (i,ins) in bot.instructions.into_iter().enumerate(){
let state_deterministic=physics_deterministic.clone();
let state_filtered=physics_filtered.clone();
PhysicsContext::run_input_instruction(&mut physics_deterministic,&physics_data,ins.clone());
match ins{
strafesnet_common::instruction::TimedInstruction{instruction:strafesnet_common::physics::Instruction::Idle,..}=>(),
other=>{
non_idle_count+=1;
// run
PhysicsContext::run_input_instruction(&mut physics_filtered,&physics_data,other.clone());
// check if position matches
let b0=physics_deterministic.camera_body();
let b1=physics_filtered.camera_body();
if b0.position!=b1.position{
let nanoseconds=start.elapsed().as_nanos() as u64;
println!("desync at instruction #{}",i);
println!("non idle instructions completed={non_idle_count}");
println!("instruction #{i}={:?}",other);
println!("deterministic state0:\n{state_deterministic:?}");
println!("filtered state0:\n{state_filtered:?}");
println!("deterministic state1:\n{:?}",physics_deterministic);
println!("filtered state1:\n{:?}",physics_filtered);
return SegmentResult{
determinism:DeterminismResult::NonDeterministic,
ticks:1+i as u64+non_idle_count,
nanoseconds,
};
}
},
}
}
let nanoseconds=start.elapsed().as_nanos() as u64;
match physics_deterministic.get_finish_time(){
Some(time)=>println!("[with idle] finish time:{}",time),
None=>println!("[with idle] simulation did not end in finished state"),
}
match physics_filtered.get_finish_time(){
Some(time)=>println!("[filtered] finish time:{}",time),
None=>println!("[filtered] simulation did not end in finished state"),
}
SegmentResult{
determinism:DeterminismResult::Deterministic,
ticks:instruction_count as u64+non_idle_count,
nanoseconds,
}
}
type ThreadResult=Result<Option<SegmentResult>,ReplayError>;
fn read_and_run(file_path:std::path::PathBuf,physics_data:&PhysicsData)->ThreadResult{
let data=read_entire_file(file_path.as_path())?;
let bot=strafesnet_snf::read_bot(data)?.read_all()?;
println!("Running {:?}",file_path.file_stem());
Ok(Some(segment_determinism(bot,physics_data)))
}
fn do_thread<'a>(s:&'a std::thread::Scope<'a,'_>,file_path:std::path::PathBuf,send:std::sync::mpsc::Sender<ThreadResult>,physics_data:&'a PhysicsData){
s.spawn(move ||{
let result=read_and_run(file_path,physics_data);
// send when thread is complete
send.send(result).unwrap();
});
}
fn get_file_path(dir_entry:std::fs::DirEntry)->Result<Option<std::path::PathBuf>,std::io::Error>{
Ok(dir_entry.file_type()?.is_file().then_some(
dir_entry.path()
))
}
fn test_determinism()->Result<(),ReplayError>{
let thread_limit=std::thread::available_parallelism()?.get();
println!("loading map file..");
let data=read_entire_file("../tools/bhop_maps/5692113331.snfm")?;
let map=strafesnet_snf::read_map(data)?.into_complete_map()?;
let mut physics_data=PhysicsData::default();
println!("generating models..");
physics_data.generate_models(&map);
let (send,recv)=std::sync::mpsc::channel();
let mut read_dir=std::fs::read_dir("../tools/replays")?;
// promise that &physics_data will outlive the spawned threads
let thread_results=std::thread::scope(|s|{
let mut thread_results=Vec::new();
// spawn threads
println!("spawning up to {thread_limit} threads...");
let mut active_thread_count=0;
while active_thread_count<thread_limit{
if let Some(dir_entry_result)=read_dir.next(){
if let Some(file_path)=get_file_path(dir_entry_result?)?{
active_thread_count+=1;
do_thread(s,file_path,send.clone(),&physics_data);
}
}else{
break;
}
}
// spawn another thread every time a message is received from the channel
println!("riding parallelism wave...");
while let Some(dir_entry_result)=read_dir.next(){
if let Some(file_path)=get_file_path(dir_entry_result?)?{
// wait for a thread to complete
thread_results.push(recv.recv().unwrap());
do_thread(s,file_path,send.clone(),&physics_data);
}
}
// wait for remaining threads to complete
println!("waiting for all threads to complete...");
for _ in 0..active_thread_count{
thread_results.push(recv.recv().unwrap());
}
println!("done.");
Ok::<_,ReplayError>(thread_results)
})?;
// tally results
#[derive(Default)]
struct Totals{
deterministic:u32,
nondeterministic:u32,
invalid:u32,
error:u32,
}
let mut nanoseconds=0;
let mut ticks=0;
let Totals{deterministic,nondeterministic,invalid,error}=thread_results.into_iter().fold(Totals::default(),|mut totals,result|{
match result{
Ok(Some(segment_result))=>{
ticks+=segment_result.ticks;
nanoseconds+=segment_result.nanoseconds;
match segment_result.determinism{
DeterminismResult::Deterministic=>totals.deterministic+=1,
DeterminismResult::NonDeterministic=>totals.nondeterministic+=1,
}
},
Ok(None)=>totals.invalid+=1,
Err(_)=>totals.error+=1,
}
totals
});
println!("deterministic={deterministic}");
println!("nondeterministic={nondeterministic}");
println!("invalid={invalid}");
println!("error={error}");
println!("average ticks/s per core: {}",ticks*1_000_000_000/nanoseconds);
assert!(nondeterministic==0);
assert!(invalid==0);
assert!(error==0);
Ok(())
}

@ -1,7 +1,7 @@
[package] [package]
name = "strafesnet_bsp_loader" name = "strafesnet_bsp_loader"
version = "0.3.0" version = "0.2.2"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Convert Valve BSP files to StrafesNET data structures." description = "Convert Valve BSP files to StrafesNET data structures."
@ -10,10 +10,7 @@ authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
glam = "0.30.0" glam = "0.29.0"
strafesnet_common = { version = "0.6.0", path = "../common", registry = "strafesnet" } strafesnet_common = { path = "../common", registry = "strafesnet" }
strafesnet_deferred_loader = { version = "0.5.0", path = "../deferred_loader", registry = "strafesnet" } vbsp = "0.6.0"
vbsp = "0.8.0"
vbsp-entities-css = "0.6.0"
vmdl = "0.2.0" vmdl = "0.2.0"
vpk = "0.3.0"

@ -1,343 +0,0 @@
use strafesnet_common::integer::Planar64;
use strafesnet_common::{model,integer};
use strafesnet_common::integer::{vec3::Vector3,Fixed,Ratio};
use crate::{valve_transform_normal,valve_transform_dist};
#[derive(Hash,Eq,PartialEq)]
struct Face{
normal:integer::Planar64Vec3,
dot:integer::Planar64,
}
#[derive(Debug)]
struct Faces{
faces:Vec<Vec<integer::Planar64Vec3>>,
}
fn solve3(c0:&Face,c1:&Face,c2:&Face)->Option<Ratio<Vector3<Fixed<3,96>>,Fixed<3,96>>>{
let n0_n1=c0.normal.cross(c1.normal);
let det=c2.normal.dot(n0_n1);
if det.abs().is_zero(){
return None;
}
Some((
c1.normal.cross(c2.normal)*c0.dot
+c2.normal.cross(c0.normal)*c1.dot
+c0.normal.cross(c1.normal)*c2.dot
)/det)
}
#[derive(Debug)]
pub enum PlanesToFacesError{
InitFace1,
InitFace2,
InitIntersection,
FindNewIntersection,
// Narrow(strafesnet_common::integer::NarrowError),
EmptyFaces,
InfiniteLoop1,
InfiniteLoop2,
}
impl std::fmt::Display for PlanesToFacesError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl core::error::Error for PlanesToFacesError{}
fn planes_to_faces(face_list:std::collections::HashSet<Face>)->Result<Faces,PlanesToFacesError>{
let mut faces=Vec::new();
// for each face, determine one edge at a time until you complete the face
'face: for face0 in &face_list{
// 1. find first edge
// 2. follow edges around face
// === finding first edge ===
// 1. pick the most perpendicular set of 3 faces
// 2. check if any faces occlude the intersection
// 3. use this test to replace left and right alternating until they are not occluded
// find the most perpendicular face to face0
let mut face1=face_list.iter().min_by_key(|&p|{
face0.normal.dot(p.normal).abs()
}).ok_or(PlanesToFacesError::InitFace1)?;
// direction of edge formed by face0 x face1
let edge_dir=face0.normal.cross(face1.normal);
// find the most perpendicular face to both face0 and face1
let mut face2=face_list.iter().max_by_key(|&p|{
// find the best *oriented* face (no .abs())
edge_dir.dot(p.normal)
}).ok_or(PlanesToFacesError::InitFace2)?;
let mut detect_loop=200u8;
let mut intersection=solve3(face0,face1,face2).ok_or(PlanesToFacesError::InitIntersection)?;
// repeatedly update face1, face2 until all faces form part of the convex solid
'find: loop{
detect_loop=detect_loop.checked_sub(1).ok_or(PlanesToFacesError::InfiniteLoop1)?;
// test if any *other* faces occlude the intersection
for new_face in &face_list{
// new face occludes intersection point
if (new_face.dot.widen_2()/Planar64::ONE).lt_ratio(new_face.normal.dot(intersection.num)/intersection.den){
// replace one of the faces with the new face
// dont' try to replace face0 because we are exploring that face in particular
if let Some(new_intersection)=solve3(face0,new_face,face2){
// face1 does not occlude (or intersect) the new intersection
if (face1.dot.widen_2()/Planar64::ONE).gt_ratio(face1.normal.dot(new_intersection.num)/new_intersection.den){
face1=new_face;
intersection=new_intersection;
continue 'find;
}
}
if let Some(new_intersection)=solve3(face0,face1,new_face){
// face2 does not occlude (or intersect) the new intersection
if (face2.dot.widen_2()/Planar64::ONE).gt_ratio(face2.normal.dot(new_intersection.num)/new_intersection.den){
face2=new_face;
intersection=new_intersection;
continue 'find;
}
}
}
}
// we have found a set of faces for which the intersection is on the convex solid
break 'find;
}
// check if face0 must go, meaning it is a degenerate face and does not contribute anything to the convex solid
for new_face in &face_list{
if core::ptr::eq(face0,new_face){
continue;
}
if core::ptr::eq(face1,new_face){
continue;
}
if core::ptr::eq(face2,new_face){
continue;
}
// new_face occludes intersection meaning intersection is not on convex solid and face0 is degenrate
if (new_face.dot.widen_2()/Planar64::ONE).lt_ratio(new_face.normal.dot(intersection.num)/intersection.den){
// abort! reject face0 entirely
continue 'face;
}
}
// === follow edges around face ===
// Note that we chose face2 such that the 3 faces create a particular winding order.
// If we choose a consistent face to follow (face1, face2) it will always wind with a consistent chirality
let mut detect_loop=200u8;
// keep looping until we meet this face again
let face1=face1;
let mut face=Vec::new();
loop{
// push point onto vertices
// problem: this may push a vertex that does not fit in the fixed point range and is thus meaningless
face.push(intersection.divide().narrow_1().unwrap());
// we looped back around to face1, we're done!
if core::ptr::eq(face1,face2){
break;
}
// the measure
let edge_dir=face0.normal.cross(face2.normal);
// the dot product to beat
let d_intersection=edge_dir.dot(intersection.num)/intersection.den;
// find the next face moving clockwise around face0
let (new_face,new_intersection,_)=face_list.iter().filter_map(|new_face|{
// ignore faces that are part of the current edge
if core::ptr::eq(face0,new_face)
|core::ptr::eq(face2,new_face){
return None;
}
let new_intersection=solve3(face0,face2,new_face)?;
// the d value must be larger
let d_new_intersection=edge_dir.dot(new_intersection.num)/new_intersection.den;
if d_new_intersection.le_ratio(d_intersection){
return None;
}
Some((new_face,new_intersection,d_new_intersection))
}).min_by_key(|&(_,_,d)|d).ok_or(PlanesToFacesError::FindNewIntersection)?;
face2=new_face;
intersection=new_intersection;
detect_loop=detect_loop.checked_sub(1).ok_or(PlanesToFacesError::InfiniteLoop2)?;
}
faces.push(face);
}
if faces.is_empty(){
Err(PlanesToFacesError::EmptyFaces)
}else{
Ok(Faces{
faces,
})
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub enum BrushToMeshError{
SliceBrushSides,
MissingPlane,
InvalidFaceCount{
count:usize,
},
InvalidPlanes(PlanesToFacesError),
}
impl std::fmt::Display for BrushToMeshError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl core::error::Error for BrushToMeshError{}
pub fn faces_to_mesh(faces:Vec<Vec<integer::Planar64Vec3>>)->model::Mesh{
// generate the mesh
let mut mb=model::MeshBuilder::new();
let color=mb.acquire_color_id(glam::Vec4::ONE);
let tex=mb.acquire_tex_id(glam::Vec2::ZERO);
// normals are ignored by physics
let normal=mb.acquire_normal_id(integer::vec3::ZERO);
let polygon_list=faces.into_iter().map(|face|{
face.into_iter().map(|pos|{
let pos=mb.acquire_pos_id(pos);
mb.acquire_vertex_id(model::IndexedVertex{
pos,
tex,
normal,
color,
})
}).collect()
}).collect();
let polygon_groups=vec![model::PolygonGroup::PolygonList(model::PolygonList::new(polygon_list))];
let physics_groups=vec![model::IndexedPhysicsGroup{
groups:vec![model::PolygonGroupId::new(0)],
}];
let graphics_groups=vec![];
mb.build(polygon_groups,graphics_groups,physics_groups)
}
pub fn brush_to_mesh(bsp:&vbsp::Bsp,brush:&vbsp::Brush)->Result<model::Mesh,BrushToMeshError>{
let brush_start_idx=brush.brush_side as usize;
let sides_range=brush_start_idx..brush_start_idx+brush.num_brush_sides as usize;
let sides=bsp.brush_sides.get(sides_range).ok_or(BrushToMeshError::SliceBrushSides)?;
let face_list=sides.iter().map(|side|{
// The so-called tumor brushes have TRIGGER bit set
// but also ignore visleaf hint and skip sides
const TUMOR:vbsp::TextureFlags=vbsp::TextureFlags::HINT.union(vbsp::TextureFlags::SKIP).union(vbsp::TextureFlags::TRIGGER);
if let Some(texture_info)=bsp.texture_info(side.texture_info as usize){
if texture_info.flags.intersects(TUMOR){
return None;
}
}
let plane=bsp.plane(side.plane as usize)?;
Some(Face{
normal:valve_transform_normal(plane.normal.into()),
dot:valve_transform_dist(plane.dist.into()),
})
}).collect::<Option<std::collections::HashSet<_>>>().ok_or(BrushToMeshError::MissingPlane)?;
if face_list.len()<4{
return Err(BrushToMeshError::InvalidFaceCount{count:face_list.len()});
}
let faces=planes_to_faces(face_list).map_err(BrushToMeshError::InvalidPlanes)?;
let mesh=faces_to_mesh(faces.faces);
Ok(mesh)
}
pub fn unit_cube()->model::Mesh{
let face_list=[
Face{normal:integer::vec3::X,dot:Planar64::ONE},
Face{normal:integer::vec3::Y,dot:Planar64::ONE},
Face{normal:integer::vec3::Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_X,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Y,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Z,dot:Planar64::ONE},
].into_iter().collect();
let faces=planes_to_faces(face_list).unwrap();
let mesh=faces_to_mesh(faces.faces);
mesh
}
#[cfg(test)]
mod test{
use super::*;
#[test]
fn test_cube(){
let face_list=[
Face{normal:integer::vec3::X,dot:Planar64::ONE},
Face{normal:integer::vec3::Y,dot:Planar64::ONE},
Face{normal:integer::vec3::Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_X,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Y,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Z,dot:Planar64::ONE},
].into_iter().collect();
let faces=planes_to_faces(face_list).unwrap();
assert_eq!(faces.faces.len(),6);
dbg!(faces);
}
#[test]
fn test_cube_with_degernate_face(){
let face_list=[
Face{normal:integer::vec3::X,dot:Planar64::ONE},
Face{normal:integer::vec3::Y,dot:Planar64::ONE},
Face{normal:integer::vec3::Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_X,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Y,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Z,dot:Planar64::EPSILON},
].into_iter().collect();
let faces=planes_to_faces(face_list).unwrap();
assert_eq!(faces.faces.len(),6);
dbg!(faces);
}
#[test]
fn test_cube_with_degernate_face2(){
let face_list=[
Face{normal:integer::vec3::X,dot:Planar64::ONE},
Face{normal:integer::vec3::Y,dot:Planar64::ONE},
Face{normal:integer::vec3::Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_X,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Y,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_X+integer::vec3::NEG_Z,dot:-Planar64::EPSILON},
].into_iter().collect();
let faces=planes_to_faces(face_list).unwrap();
assert_eq!(faces.faces.len(),5);
dbg!(faces);
}
#[test]
fn test_cube_with_degernate_face3(){
let face_list=[
Face{normal:integer::vec3::X,dot:Planar64::ONE},
Face{normal:integer::vec3::Y,dot:Planar64::ONE},
Face{normal:integer::vec3::Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_X,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Y,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_Z,dot:Planar64::ONE},
Face{normal:integer::vec3::NEG_X+integer::vec3::NEG_Z,dot:Planar64::EPSILON},
].into_iter().collect();
let faces=planes_to_faces(face_list).unwrap();
assert_eq!(faces.faces.len(),7);
dbg!(faces);
}
}

@ -1,142 +1,43 @@
use std::borrow::Cow; use strafesnet_common::{map,model,integer,gameplay_attributes};
use vbsp_entities_css::Entity; const VALVE_SCALE:f32=1.0/16.0;
fn valve_transform([x,y,z]:[f32;3])->integer::Planar64Vec3{
use strafesnet_common::{map,model,integer,gameplay_attributes as attr}; integer::vec3::try_from_f32_array([x*VALVE_SCALE,z*VALVE_SCALE,-y*VALVE_SCALE]).unwrap()
use strafesnet_deferred_loader::deferred_loader::{MeshDeferredLoader,RenderConfigDeferredLoader};
use strafesnet_deferred_loader::mesh::Meshes;
use strafesnet_deferred_loader::texture::{RenderConfigs,Texture};
use strafesnet_common::gameplay_modes::{NormalizedMode,NormalizedModes,Mode,Stage};
use crate::valve_transform;
fn ingest_vertex(
mb:&mut model::MeshBuilder,
world_position:vbsp::Vector,
texture_transform_u:glam::Vec4,
texture_transform_v:glam::Vec4,
normal:model::NormalId,
color:model::ColorId,
)->model::VertexId{
//world_model.origin seems to always be 0,0,0
let vertex_xyz=world_position.into();
let pos=mb.acquire_pos_id(valve_transform(vertex_xyz));
//calculate texture coordinates
let pos_4d=glam::Vec3::from_array(vertex_xyz).extend(1.0);
let tex=glam::vec2(texture_transform_u.dot(pos_4d),texture_transform_v.dot(pos_4d));
let tex=mb.acquire_tex_id(tex);
mb.acquire_vertex_id(model::IndexedVertex{
pos,
tex,
normal,
color,
})
} }
pub fn convert_bsp<AcquireRenderConfigId,AcquireMeshId>(
fn add_brush<'a>( bsp:&vbsp::Bsp,
mesh_deferred_loader:&mut MeshDeferredLoader<&'a str>, mut acquire_render_config_id:AcquireRenderConfigId,
world_models:&mut Vec<model::Model>, mut acquire_mesh_id:AcquireMeshId
prop_models:&mut Vec<model::Model>, )->PartialMap1
model:&'a str, where
origin:vbsp::Vector, AcquireRenderConfigId:FnMut(Option<&str>)->model::RenderConfigId,
rendercolor:vbsp::Color, AcquireMeshId:FnMut(&str)->model::MeshId,
attributes:attr::CollisionAttributesId, {
){
let transform=integer::Planar64Affine3::from_translation(
valve_transform(origin.into())
);
let color=(glam::Vec3::from_array([
rendercolor.r as f32,
rendercolor.g as f32,
rendercolor.b as f32
])/255.0).extend(1.0);
match model.chars().next(){
// The first character of brush.model is '*'
Some('*')=>match model[1..].parse(){
Ok(mesh_id)=>{
let mesh=model::MeshId::new(mesh_id);
world_models.push(
model::Model{mesh,attributes,transform,color}
);
},
Err(e)=>{
println!("Brush model int parse error: {e} model={model}");
return;
},
},
_=>{
let mesh=mesh_deferred_loader.acquire_mesh_id(model);
prop_models.push(
model::Model{mesh,attributes,transform,color}
);
}
}
}
pub fn convert<'a>(
bsp:&'a crate::Bsp,
render_config_deferred_loader:&mut RenderConfigDeferredLoader<Cow<'a,str>>,
mesh_deferred_loader:&mut MeshDeferredLoader<&'a str>,
)->PartialMap1{
let bsp=bsp.as_ref();
//figure out real attributes later //figure out real attributes later
let unique_attributes=vec![ let mut unique_attributes=Vec::new();
attr::CollisionAttributes::Decoration, unique_attributes.push(gameplay_attributes::CollisionAttributes::Decoration);
attr::CollisionAttributes::contact_default(), const TEMP_TOUCH_ME_ATTRIBUTE:gameplay_attributes::CollisionAttributesId=gameplay_attributes::CollisionAttributesId::new(0);
attr::CollisionAttributes::intersect_default(),
// ladder
attr::CollisionAttributes::Contact(
attr::ContactAttributes{
contacting:attr::ContactingAttributes{
contact_behaviour:Some(attr::ContactingBehaviour::Ladder(
attr::ContactingLadder{sticky:true}
))
},
general:attr::GeneralAttributes::default(),
}
),
// water
attr::CollisionAttributes::Intersect(
attr::IntersectAttributes{
intersecting:attr::IntersectingAttributes{
water:Some(attr::IntersectingWater{
viscosity:integer::Planar64::ONE,
density:integer::Planar64::ONE,
velocity:integer::vec3::ZERO,
}),
},
general:attr::GeneralAttributes::default(),
}
),
];
const ATTRIBUTE_DECORATION:attr::CollisionAttributesId=attr::CollisionAttributesId::new(0);
const ATTRIBUTE_CONTACT_DEFAULT:attr::CollisionAttributesId=attr::CollisionAttributesId::new(1);
const ATTRIBUTE_INTERSECT_DEFAULT:attr::CollisionAttributesId=attr::CollisionAttributesId::new(2);
const ATTRIBUTE_LADDER_DEFAULT:attr::CollisionAttributesId=attr::CollisionAttributesId::new(3);
const ATTRIBUTE_WATER_DEFAULT:attr::CollisionAttributesId=attr::CollisionAttributesId::new(4);
let mut prop_mesh_count=0;
//declare all prop models to Loader //declare all prop models to Loader
let mut prop_models=bsp.static_props().map(|prop|{ let prop_models=bsp.static_props().map(|prop|{
const DEG_TO_RAD:f32=std::f32::consts::TAU/360.0;
//get or create mesh_id //get or create mesh_id
let mesh_id=mesh_deferred_loader.acquire_mesh_id(prop.model()); let mesh_id=acquire_mesh_id(prop.model());
//not the most failsafe code but this is just for the map tool lmao
if prop_mesh_count==mesh_id.get(){
prop_mesh_count+=1;
};
let placement=prop.as_prop_placement();
model::Model{ model::Model{
mesh:mesh_id, mesh:mesh_id,
attributes:ATTRIBUTE_DECORATION, attributes:TEMP_TOUCH_ME_ATTRIBUTE,
transform:integer::Planar64Affine3::new( transform:integer::Planar64Affine3::new(
integer::mat3::try_from_f32_array_2d( integer::mat3::try_from_f32_array_2d((
glam::Mat3A::from_diagonal(glam::Vec3::splat(placement.scale))
//TODO: figure this out //TODO: figure this out
glam::Mat3A::from_euler( *glam::Mat3A::from_quat(glam::Quat::from_array(placement.rotation.into()))
glam::EulerRot::XYZ, ).to_cols_array_2d()).unwrap(),
prop.angles.pitch*DEG_TO_RAD, valve_transform(placement.origin.into()),
prop.angles.yaw*DEG_TO_RAD,
prop.angles.roll*DEG_TO_RAD
).to_cols_array_2d()
).unwrap(),
valve_transform(prop.origin.into()),
), ),
color:glam::Vec4::ONE, color:glam::Vec4::ONE,
} }
@ -146,12 +47,14 @@ pub fn convert<'a>(
//the generated MeshIds in here will collide with the Loader Mesh Ids //the generated MeshIds in here will collide with the Loader Mesh Ids
//but I can't think of a good workaround other than just remapping one later. //but I can't think of a good workaround other than just remapping one later.
let mut world_meshes:Vec<model::Mesh>=bsp.models().map(|world_model|{ let world_meshes:Vec<model::Mesh>=bsp.models().map(|world_model|{
let mut mb=model::MeshBuilder::new(); //non-deduplicated
let mut spam_pos=Vec::new();
let color=mb.acquire_color_id(glam::Vec4::ONE); let mut spam_tex=Vec::new();
let mut spam_normal=Vec::new();
let mut spam_vertices=Vec::new();
let mut graphics_groups=Vec::new(); let mut graphics_groups=Vec::new();
let mut render_id_to_graphics_group_id=std::collections::HashMap::new(); let mut physics_group=model::IndexedPhysicsGroup::default();
let polygon_groups=world_model.faces().enumerate().map(|(polygon_group_id,face)|{ let polygon_groups=world_model.faces().enumerate().map(|(polygon_group_id,face)|{
let polygon_group_id=model::PolygonGroupId::new(polygon_group_id as u32); let polygon_group_id=model::PolygonGroupId::new(polygon_group_id as u32);
let face_texture=face.texture(); let face_texture=face.texture();
@ -160,200 +63,139 @@ pub fn convert<'a>(
let texture_transform_u=glam::Vec4::from_array(face_texture.texture_transforms_u)/(face_texture_data.width as f32); let texture_transform_u=glam::Vec4::from_array(face_texture.texture_transforms_u)/(face_texture_data.width as f32);
let texture_transform_v=glam::Vec4::from_array(face_texture.texture_transforms_v)/(face_texture_data.height as f32); let texture_transform_v=glam::Vec4::from_array(face_texture.texture_transforms_v)/(face_texture_data.height as f32);
//this automatically figures out what the texture is trying to do and creates
//a render config for it, and then returns the id to that render config
let render_id=acquire_render_config_id(Some(face_texture_data.name()));
//normal //normal
let normal=mb.acquire_normal_id(valve_transform(face.normal().into())); let normal=face.normal();
let mut polygon_iter=face.vertex_positions().map(|vertex_position| let normal_idx=spam_normal.len() as u32;
world_model.origin+vertex_position spam_normal.push(valve_transform(normal.into()));
); let mut polygon_iter=face.vertex_positions().map(|vertex_position|{
//world_model.origin seems to always be 0,0,0
let vertex_xyz=(world_model.origin+vertex_position).into();
let pos_idx=spam_pos.len();
spam_pos.push(valve_transform(vertex_xyz));
//calculate texture coordinates
let pos=glam::Vec3::from_array(vertex_xyz).extend(1.0);
let tex=glam::vec2(texture_transform_u.dot(pos),texture_transform_v.dot(pos));
let tex_idx=spam_tex.len() as u32;
spam_tex.push(tex);
let vertex_id=model::VertexId::new(spam_vertices.len() as u32);
spam_vertices.push(model::IndexedVertex{
pos:model::PositionId::new(pos_idx as u32),
tex:model::TextureCoordinateId::new(tex_idx as u32),
normal:model::NormalId::new(normal_idx),
color:model::ColorId::new(0),
});
vertex_id
});
let polygon_list=std::iter::from_fn(move||{ let polygon_list=std::iter::from_fn(move||{
match (polygon_iter.next(),polygon_iter.next(),polygon_iter.next()){ match (polygon_iter.next(),polygon_iter.next(),polygon_iter.next()){
(Some(v1),Some(v2),Some(v3))=>Some([v1,v2,v3]), (Some(v1),Some(v2),Some(v3))=>Some(vec![v1,v2,v3]),
//ignore extra vertices, not sure what to do in this case, failing the whole conversion could be appropriate //ignore extra vertices, not sure what to do in this case, failing the whole conversion could be appropriate
_=>None, _=>None,
} }
}).map(|triplet|{
triplet.map(|world_position|
ingest_vertex(&mut mb,world_position,texture_transform_u,texture_transform_v,normal,color)
).to_vec()
}).collect(); }).collect();
if face.is_visible(){ if face.is_visible(){
//this automatically figures out what the texture is trying to do and creates //TODO: deduplicate graphics groups by render id
//a render config for it, and then returns the id to that render config
let render_id=render_config_deferred_loader.acquire_render_config_id(Some(Cow::Borrowed(face_texture_data.name())));
//deduplicate graphics groups by render id
let graphics_group_id=*render_id_to_graphics_group_id.entry(render_id).or_insert_with(||{
let graphics_group_id=graphics_groups.len();
graphics_groups.push(model::IndexedGraphicsGroup{ graphics_groups.push(model::IndexedGraphicsGroup{
render:render_id, render:render_id,
groups:vec![], groups:vec![polygon_group_id],
}); })
graphics_group_id
});
graphics_groups[graphics_group_id].groups.push(polygon_group_id);
} }
physics_group.groups.push(polygon_group_id);
model::PolygonGroup::PolygonList(model::PolygonList::new(polygon_list)) model::PolygonGroup::PolygonList(model::PolygonList::new(polygon_list))
}).collect(); }).collect();
model::Mesh{
mb.build(polygon_groups,graphics_groups,vec![]) unique_pos:spam_pos,
unique_tex:spam_tex,
unique_normal:spam_normal,
unique_color:vec![glam::Vec4::ONE],
unique_vertices:spam_vertices,
polygon_groups,
graphics_groups,
physics_groups:vec![physics_group],
}
}).collect(); }).collect();
let mut found_spawn=None; let world_models:Vec<model::Model>=
//one instance of the main world mesh
let mut world_models=Vec::new(); std::iter::once((
//world_model
// the one and only world model 0 model::MeshId::new(0),
world_models.push(model::Model{ //model_origin
mesh:model::MeshId::new(0), vbsp::Vector::from([0.0,0.0,0.0]),
attributes:ATTRIBUTE_DECORATION, //model_color
transform:integer::Planar64Affine3::IDENTITY, vbsp::Color{r:255,g:255,b:255},
color:glam::Vec4::W, )).chain(
}); //entities sprinkle instances of the other meshes around
bsp.entities.iter()
const WHITE:vbsp::Color=vbsp::Color{r:255,g:255,b:255}; .flat_map(|ent|ent.parse())//ignore entity parsing errors
for raw_ent in &bsp.entities{ .filter_map(|ent|match ent{
match raw_ent.parse(){ vbsp::Entity::Brush(brush)=>Some(brush),
Ok(Entity::Cycler(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), vbsp::Entity::BrushIllusionary(brush)=>Some(brush),
Ok(Entity::EnvSprite(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), vbsp::Entity::BrushWall(brush)=>Some(brush),
Ok(Entity::FuncBreakable(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), vbsp::Entity::BrushWallToggle(brush)=>Some(brush),
Ok(Entity::FuncBrush(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), _=>None,
Ok(Entity::FuncButton(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), }).flat_map(|brush|
Ok(Entity::FuncDoor(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), //The first character of brush.model is '*'
Ok(Entity::FuncDoorRotating(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), brush.model[1..].parse().map(|mesh_id|//ignore parse int errors
Ok(Entity::FuncIllusionary(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), (model::MeshId::new(mesh_id),brush.origin,brush.color)
Ok(Entity::FuncMonitor(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), )
Ok(Entity::FuncMovelinear(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), )
Ok(Entity::FuncPhysbox(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), ).map(|(mesh_id,model_origin,vbsp::Color{r,g,b})|{
Ok(Entity::FuncPhysboxMultiplayer(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION), model::Model{
Ok(Entity::FuncRotButton(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::FuncRotating(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION),
Ok(Entity::FuncTracktrain(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION),
Ok(Entity::FuncTrain(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor,ATTRIBUTE_DECORATION),
Ok(Entity::FuncWall(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin.unwrap_or_default(),brush.rendercolor,ATTRIBUTE_DECORATION),
Ok(Entity::FuncWallToggle(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin.unwrap_or_default(),brush.rendercolor,ATTRIBUTE_DECORATION),
Ok(Entity::FuncWaterAnalog(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,brush.rendercolor.unwrap_or(WHITE),ATTRIBUTE_DECORATION),
Ok(Entity::PropDoorRotating(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::PropDynamic(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::PropDynamicOverride(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::PropPhysics(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::PropPhysicsMultiplayer(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::PropPhysicsOverride(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::PropRagdoll(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerGravity(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerHurt(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerLook(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerMultiple(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model.unwrap_or_default(),brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerOnce(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerProximity(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerPush(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerSoundscape(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerTeleport(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model.unwrap_or_default(),brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerVphysicsMotion(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::TriggerWind(brush))=>add_brush(mesh_deferred_loader,&mut world_models,&mut prop_models,brush.model,brush.origin,WHITE,ATTRIBUTE_DECORATION),
Ok(Entity::InfoPlayerCounterterrorist(spawn))=>{
found_spawn=Some(valve_transform(spawn.origin.into()));
},
Ok(Entity::InfoPlayerTerrorist(spawn))=>{
found_spawn=Some(valve_transform(spawn.origin.into()));
},
Err(e)=>{
println!("Bsp Entity parse error: {e}");
},
_=>(),
}
}
// physics models
for brush in &bsp.brushes{
const RELEVANT:vbsp::BrushFlags=
vbsp::BrushFlags::SOLID
.union(vbsp::BrushFlags::PLAYERCLIP)
.union(vbsp::BrushFlags::WATER)
.union(vbsp::BrushFlags::MOVEABLE)
.union(vbsp::BrushFlags::LADDER);
if !brush.flags.intersects(RELEVANT){
continue;
}
let is_ladder=brush.flags.contains(vbsp::BrushFlags::LADDER);
let is_water=brush.flags.contains(vbsp::BrushFlags::WATER);
let attributes=match (is_ladder,is_water){
(true,false)=>ATTRIBUTE_LADDER_DEFAULT,
(false,true)=>ATTRIBUTE_WATER_DEFAULT,
(false,false)=>ATTRIBUTE_CONTACT_DEFAULT,
(true,true)=>{
// water ladder? wtf
println!("brush is a water ladder o_o defaulting to ladder");
ATTRIBUTE_LADDER_DEFAULT
}
};
let mesh_result=crate::brush::brush_to_mesh(bsp,brush);
match mesh_result{
Ok(mesh)=>{
let mesh_id=model::MeshId::new(world_meshes.len() as u32);
world_meshes.push(mesh);
world_models.push(model::Model{
mesh:mesh_id, mesh:mesh_id,
attributes, attributes:TEMP_TOUCH_ME_ATTRIBUTE,
transform:integer::Planar64Affine3::new( transform:integer::Planar64Affine3::new(
integer::mat3::identity(), integer::mat3::identity(),
integer::vec3::ZERO, valve_transform(model_origin.into())
), ),
color:glam::Vec4::ONE, color:(glam::Vec3::from_array([r as f32,g as f32,b as f32])/255.0).extend(1.0),
});
},
Err(e)=>println!("Brush mesh error: {e}"),
}
}
let mut modes_list=Vec::new();
if let Some(spawn_point)=found_spawn{
// create a new mesh
let mesh_id=model::MeshId::new(world_meshes.len() as u32);
world_meshes.push(crate::brush::unit_cube());
// create a new model
let model_id=model::ModelId::new(world_models.len() as u32);
world_models.push(model::Model{
mesh:mesh_id,
attributes:ATTRIBUTE_INTERSECT_DEFAULT,
transform:integer::Planar64Affine3::from_translation(spawn_point),
color:glam::Vec4::W,
});
let first_stage=Stage::empty(model_id);
let main_mode=Mode::new(
strafesnet_common::gameplay_style::StyleModifiers::source_bhop(),
model_id,
std::collections::HashMap::new(),
vec![first_stage],
std::collections::HashMap::new(),
);
modes_list.push(NormalizedMode::new(main_mode));
} }
}).collect();
PartialMap1{ PartialMap1{
attributes:unique_attributes, attributes:unique_attributes,
world_meshes, world_meshes,
prop_models, prop_models,
world_models, world_models,
modes:NormalizedModes::new(modes_list), modes:strafesnet_common::gameplay_modes::Modes::new(Vec::new()),
} }
} }
//partially constructed map types //partially constructed map types
pub struct PartialMap1{ pub struct PartialMap1{
attributes:Vec<attr::CollisionAttributes>, attributes:Vec<strafesnet_common::gameplay_attributes::CollisionAttributes>,
prop_models:Vec<model::Model>, prop_models:Vec<model::Model>,
world_meshes:Vec<model::Mesh>, world_meshes:Vec<model::Mesh>,
world_models:Vec<model::Model>, world_models:Vec<model::Model>,
modes:NormalizedModes, modes:strafesnet_common::gameplay_modes::Modes,
} }
impl PartialMap1{ impl PartialMap1{
pub fn add_prop_meshes<'a>( pub fn add_prop_meshes<AcquireRenderConfigId>(
self, self,
prop_meshes:Meshes, prop_meshes:impl IntoIterator<Item=(model::MeshId,crate::data::ModelData)>,
)->PartialMap2{ mut acquire_render_config_id:AcquireRenderConfigId,
)->PartialMap2
where
AcquireRenderConfigId:FnMut(Option<&str>)->model::RenderConfigId,
{
PartialMap2{ PartialMap2{
attributes:self.attributes, attributes:self.attributes,
prop_meshes:prop_meshes.consume().collect(), prop_meshes:prop_meshes.into_iter().filter_map(|(mesh_id,model_data)|
//this will generate new render ids and texture ids
match convert_mesh(model_data,&mut acquire_render_config_id){
Ok(mesh)=>Some((mesh_id,mesh)),
Err(e)=>{
println!("error converting mesh: {e}");
None
}
}
).collect(),
prop_models:self.prop_models, prop_models:self.prop_models,
world_meshes:self.world_meshes, world_meshes:self.world_meshes,
world_models:self.world_models, world_models:self.world_models,
@ -362,17 +204,18 @@ impl PartialMap1{
} }
} }
pub struct PartialMap2{ pub struct PartialMap2{
attributes:Vec<attr::CollisionAttributes>, attributes:Vec<strafesnet_common::gameplay_attributes::CollisionAttributes>,
prop_meshes:Vec<(model::MeshId,model::Mesh)>, prop_meshes:Vec<(model::MeshId,model::Mesh)>,
prop_models:Vec<model::Model>, prop_models:Vec<model::Model>,
world_meshes:Vec<model::Mesh>, world_meshes:Vec<model::Mesh>,
world_models:Vec<model::Model>, world_models:Vec<model::Model>,
modes:NormalizedModes, modes:strafesnet_common::gameplay_modes::Modes,
} }
impl PartialMap2{ impl PartialMap2{
pub fn add_render_configs_and_textures( pub fn add_render_configs_and_textures(
mut self, mut self,
render_configs:RenderConfigs, render_configs:impl IntoIterator<Item=(model::RenderConfigId,model::RenderConfig)>,
textures:impl IntoIterator<Item=(model::TextureId,Vec<u8>)>,
)->map::CompleteMap{ )->map::CompleteMap{
//merge mesh and model lists, flatten and remap all ids //merge mesh and model lists, flatten and remap all ids
let mesh_id_offset=self.world_meshes.len(); let mesh_id_offset=self.world_meshes.len();
@ -391,14 +234,13 @@ impl PartialMap2{
}) })
)); ));
//let mut models=Vec::new(); //let mut models=Vec::new();
let (textures,render_configs)=render_configs.consume();
let (textures,texture_id_map):(Vec<Vec<u8>>,std::collections::HashMap<model::TextureId,model::TextureId>) let (textures,texture_id_map):(Vec<Vec<u8>>,std::collections::HashMap<model::TextureId,model::TextureId>)
=textures.into_iter() =textures.into_iter()
//.filter_map(f) cull unused textures //.filter_map(f) cull unused textures
.enumerate().map(|(new_texture_id,(old_texture_id,Texture::ImageDDS(texture)))|{ .enumerate().map(|(new_texture_id,(old_texture_id,texture))|{
(texture,(old_texture_id,model::TextureId::new(new_texture_id as u32))) (texture,(old_texture_id,model::TextureId::new(new_texture_id as u32)))
}).unzip(); }).unzip();
let render_configs=render_configs.into_iter().map(|(_render_config_id,mut render_config)|{ let render_configs=render_configs.into_iter().map(|(render_config_id,mut render_config)|{
//this may generate duplicate no-texture render configs but idc //this may generate duplicate no-texture render configs but idc
render_config.texture=render_config.texture.and_then(|texture_id| render_config.texture=render_config.texture.and_then(|texture_id|
texture_id_map.get(&texture_id).copied() texture_id_map.get(&texture_id).copied()
@ -415,3 +257,77 @@ impl PartialMap2{
} }
} }
} }
fn convert_mesh<AcquireRenderConfigId>(
model_data:crate::data::ModelData,
acquire_render_config_id:&mut AcquireRenderConfigId,
)->Result<model::Mesh,vmdl::ModelError>
where
AcquireRenderConfigId:FnMut(Option<&str>)->model::RenderConfigId,
{
let model=model_data.read_model()?;
let texture_paths=model.texture_directories();
if texture_paths.len()!=1{
println!("WARNING: multiple texture paths");
}
let skin=model.skin_tables().nth(0).unwrap();
let mut spam_pos=Vec::with_capacity(model.vertices().len());
let mut spam_normal=Vec::with_capacity(model.vertices().len());
let mut spam_tex=Vec::with_capacity(model.vertices().len());
let mut spam_vertices=Vec::with_capacity(model.vertices().len());
for (i,vertex) in model.vertices().iter().enumerate(){
spam_pos.push(valve_transform(vertex.position.into()));
spam_normal.push(valve_transform(vertex.normal.into()));
spam_tex.push(glam::Vec2::from_array(vertex.texture_coordinates));
spam_vertices.push(model::IndexedVertex{
pos:model::PositionId::new(i as u32),
tex:model::TextureCoordinateId::new(i as u32),
normal:model::NormalId::new(i as u32),
color:model::ColorId::new(0),
});
}
let mut graphics_groups=Vec::new();
let mut physics_groups=Vec::new();
let polygon_groups=model.meshes().enumerate().map(|(polygon_group_id,mesh)|{
let polygon_group_id=model::PolygonGroupId::new(polygon_group_id as u32);
let render_id=if let (Some(texture_path),Some(texture_name))=(texture_paths.get(0),skin.texture(mesh.material_index())){
let mut path=std::path::PathBuf::from(texture_path.as_str());
path.push(texture_name);
acquire_render_config_id(path.as_os_str().to_str())
}else{
acquire_render_config_id(None)
};
graphics_groups.push(model::IndexedGraphicsGroup{
render:render_id,
groups:vec![polygon_group_id],
});
physics_groups.push(model::IndexedPhysicsGroup{
groups:vec![polygon_group_id],
});
model::PolygonGroup::PolygonList(model::PolygonList::new(
//looking at the code, it would seem that the strips are pre-deindexed into triangle lists when calling this function
mesh.vertex_strip_indices().flat_map(|mut strip|
std::iter::from_fn(move||{
match (strip.next(),strip.next(),strip.next()){
(Some(v1),Some(v2),Some(v3))=>Some([v1,v2,v3].map(|vertex_id|model::VertexId::new(vertex_id as u32)).to_vec()),
//ignore extra vertices, not sure what to do in this case, failing the whole conversion could be appropriate
_=>None,
}
})
).collect()
))
}).collect();
Ok(model::Mesh{
unique_pos:spam_pos,
unique_normal:spam_normal,
unique_tex:spam_tex,
unique_color:vec![glam::Vec4::ONE],
unique_vertices:spam_vertices,
polygon_groups,
graphics_groups,
physics_groups,
})
}

@ -0,0 +1,60 @@
pub struct Bsp(vbsp::Bsp);
impl Bsp{
pub const fn new(value:vbsp::Bsp)->Self{
Self(value)
}
}
impl AsRef<vbsp::Bsp> for Bsp{
fn as_ref(&self)->&vbsp::Bsp{
&self.0
}
}
pub struct MdlData(Vec<u8>);
impl MdlData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
}
impl AsRef<[u8]> for MdlData{
fn as_ref(&self)->&[u8]{
self.0.as_ref()
}
}
pub struct VtxData(Vec<u8>);
impl VtxData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
}
impl AsRef<[u8]> for VtxData{
fn as_ref(&self)->&[u8]{
self.0.as_ref()
}
}
pub struct VvdData(Vec<u8>);
impl VvdData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
}
impl AsRef<[u8]> for VvdData{
fn as_ref(&self)->&[u8]{
self.0.as_ref()
}
}
pub struct ModelData{
pub mdl:MdlData,
pub vtx:VtxData,
pub vvd:VvdData,
}
impl ModelData{
pub fn read_model(&self)->Result<vmdl::Model,vmdl::ModelError>{
Ok(vmdl::Model::from_parts(
vmdl::mdl::Mdl::read(self.mdl.as_ref())?,
vmdl::vtx::Vtx::read(self.vtx.as_ref())?,
vmdl::vvd::Vvd::read(self.vvd.as_ref())?,
))
}
}

@ -1,20 +1,7 @@
use strafesnet_deferred_loader::deferred_loader::{LoadFailureMode,MeshDeferredLoader,RenderConfigDeferredLoader};
mod bsp; mod bsp;
mod mesh; pub mod data;
mod brush;
pub mod loader;
const VALVE_SCALE:f32=1.0/16.0; pub use data::Bsp;
pub(crate) fn valve_transform_dist(d:f32)->strafesnet_common::integer::Planar64{
(d*VALVE_SCALE).try_into().unwrap()
}
pub(crate) fn valve_transform_normal([x,y,z]:[f32;3])->strafesnet_common::integer::Planar64Vec3{
strafesnet_common::integer::vec3::try_from_f32_array([x,z,-y]).unwrap()
}
pub(crate) fn valve_transform([x,y,z]:[f32;3])->strafesnet_common::integer::Planar64Vec3{
strafesnet_common::integer::vec3::try_from_f32_array([x*VALVE_SCALE,z*VALVE_SCALE,-y*VALVE_SCALE]).unwrap()
}
#[derive(Debug)] #[derive(Debug)]
pub enum ReadError{ pub enum ReadError{
@ -28,38 +15,6 @@ impl std::fmt::Display for ReadError{
} }
impl std::error::Error for ReadError{} impl std::error::Error for ReadError{}
#[derive(Debug)]
pub enum LoadError{
Texture(loader::TextureError),
Mesh(loader::MeshError),
}
impl std::fmt::Display for LoadError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for LoadError{}
impl From<loader::TextureError> for LoadError{
fn from(value:loader::TextureError)->Self{
Self::Texture(value)
}
}
impl From<loader::MeshError> for LoadError{
fn from(value:loader::MeshError)->Self{
Self::Mesh(value)
}
}
pub struct Bsp{
bsp:vbsp::Bsp,
case_folded_file_names:std::collections::HashMap<String,String>,
}
impl AsRef<vbsp::Bsp> for Bsp{
fn as_ref(&self)->&vbsp::Bsp{
&self.bsp
}
}
pub fn read<R:std::io::Read>(mut input:R)->Result<Bsp,ReadError>{ pub fn read<R:std::io::Read>(mut input:R)->Result<Bsp,ReadError>{
let mut s=Vec::new(); let mut s=Vec::new();
@ -68,66 +23,15 @@ pub fn read<R:std::io::Read>(mut input:R)->Result<Bsp,ReadError>{
vbsp::Bsp::read(s.as_slice()).map(Bsp::new).map_err(ReadError::Bsp) vbsp::Bsp::read(s.as_slice()).map(Bsp::new).map_err(ReadError::Bsp)
} }
impl Bsp{
pub fn new(bsp:vbsp::Bsp)->Self{
let case_folded_file_names=bsp.pack.clone().into_zip().lock().unwrap().file_names().map(|s|{
(s.to_lowercase(),s.to_owned())
}).collect();
Self{
bsp,
case_folded_file_names,
}
}
pub fn pack_get(&self,name_lowercase:&str)->Result<Option<Vec<u8>>,vbsp::BspError>{
match self.case_folded_file_names.get(name_lowercase){
Some(name_folded)=>self.bsp.pack.get(name_folded),
None=>Ok(None),
}
}
pub fn to_snf(&self,failure_mode:LoadFailureMode,vpk_list:&[Vpk])->Result<strafesnet_common::map::CompleteMap,LoadError>{
let mut texture_deferred_loader=RenderConfigDeferredLoader::new();
let mut mesh_deferred_loader=MeshDeferredLoader::new();
let map_step1=bsp::convert( pub fn convert<AcquireRenderConfigId,AcquireMeshId>(
self, bsp:&Bsp,
&mut texture_deferred_loader, acquire_render_config_id:AcquireRenderConfigId,
&mut mesh_deferred_loader, acquire_mesh_id:AcquireMeshId
); )->bsp::PartialMap1
where
let mut mesh_loader=loader::MeshLoader::new(loader::BspFinder{bsp:self,vpks:vpk_list},&mut texture_deferred_loader); AcquireRenderConfigId:FnMut(Option<&str>)->strafesnet_common::model::RenderConfigId,
let prop_meshes=mesh_deferred_loader.into_meshes(&mut mesh_loader,failure_mode).map_err(LoadError::Mesh)?; AcquireMeshId:FnMut(&str)->strafesnet_common::model::MeshId,
{
let map_step2=map_step1.add_prop_meshes(prop_meshes); bsp::convert_bsp(bsp.as_ref(),acquire_render_config_id,acquire_mesh_id)
let mut texture_loader=loader::TextureLoader::new();
let render_configs=texture_deferred_loader.into_render_configs(&mut texture_loader,failure_mode).map_err(LoadError::Texture)?;
let map=map_step2.add_render_configs_and_textures(render_configs);
Ok(map)
}
}
pub struct Vpk{
vpk:vpk::VPK,
case_folded_file_names:std::collections::HashMap<String,String>,
}
impl AsRef<vpk::VPK> for Vpk{
fn as_ref(&self)->&vpk::VPK{
&self.vpk
}
}
impl Vpk{
pub fn new(vpk:vpk::VPK)->Vpk{
let case_folded_file_names=vpk.tree.keys().map(|s|{
(s.to_lowercase(),s.to_owned())
}).collect();
Vpk{
vpk,
case_folded_file_names,
}
}
pub fn tree_get(&self,name_lowercase:&str)->Option<&vpk::entry::VPKEntry>{
let name_folded=self.case_folded_file_names.get(name_lowercase)?;
self.vpk.tree.get(name_folded)
}
} }

@ -1,175 +0,0 @@
use std::{borrow::Cow, io::Read};
use strafesnet_common::model::Mesh;
use strafesnet_deferred_loader::{loader::Loader,texture::Texture};
use crate::{Bsp,Vpk};
#[allow(dead_code)]
#[derive(Debug)]
pub enum TextureError{
Io(std::io::Error),
}
impl std::fmt::Display for TextureError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for TextureError{}
impl From<std::io::Error> for TextureError{
fn from(value:std::io::Error)->Self{
Self::Io(value)
}
}
pub struct TextureLoader<'a>(std::marker::PhantomData<&'a ()>);
impl TextureLoader<'_>{
pub fn new()->Self{
Self(std::marker::PhantomData)
}
}
impl<'a> Loader for TextureLoader<'a>{
type Error=TextureError;
type Index=Cow<'a,str>;
type Resource=Texture;
fn load(&mut self,index:Self::Index)->Result<Self::Resource,Self::Error>{
let file_name=format!("textures/{}.dds",index);
let mut file=std::fs::File::open(file_name)?;
let mut data=Vec::new();
file.read_to_end(&mut data)?;
Ok(Texture::ImageDDS(data))
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub enum MeshError{
Io(std::io::Error),
VMDL(vmdl::ModelError),
VBSP(vbsp::BspError),
MissingMdl(String),
MissingVtx,
MissingVvd,
}
impl std::fmt::Display for MeshError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for MeshError{}
impl From<std::io::Error> for MeshError{
fn from(value:std::io::Error)->Self{
Self::Io(value)
}
}
impl From<vmdl::ModelError> for MeshError{
fn from(value:vmdl::ModelError)->Self{
Self::VMDL(value)
}
}
impl From<vbsp::BspError> for MeshError{
fn from(value:vbsp::BspError)->Self{
Self::VBSP(value)
}
}
#[derive(Clone,Copy)]
pub struct BspFinder<'bsp,'vpk>{
pub bsp:&'bsp Bsp,
pub vpks:&'vpk [Vpk],
}
impl<'bsp,'vpk> BspFinder<'bsp,'vpk>{
pub fn find<'a>(&self,path:&str)->Result<Option<Cow<'a,[u8]>>,vbsp::BspError>
where
'bsp:'a,
'vpk:'a,
{
// search bsp
if let Some(data)=self.bsp.pack_get(path)?{
return Ok(Some(Cow::Owned(data)));
}
//search each vpk
for vpk in self.vpks{
if let Some(vpk_entry)=vpk.tree_get(path){
return Ok(Some(vpk_entry.get()?));
}
}
Ok(None)
}
}
pub struct ModelLoader<'bsp,'vpk,'a>{
finder:BspFinder<'bsp,'vpk>,
life:core::marker::PhantomData<&'a ()>,
}
impl ModelLoader<'_,'_,'_>{
#[inline]
pub const fn new<'bsp,'vpk,'a>(
finder:BspFinder<'bsp,'vpk>,
)->ModelLoader<'bsp,'vpk,'a>{
ModelLoader{
finder,
life:core::marker::PhantomData,
}
}
}
impl<'bsp,'vpk,'a> Loader for ModelLoader<'bsp,'vpk,'a>
where
'bsp:'a,
'vpk:'a,
{
type Error=MeshError;
type Index=&'a str;
type Resource=vmdl::Model;
fn load(&mut self,index:Self::Index)->Result<Self::Resource,Self::Error>{
let mdl_path_lower=index.to_lowercase();
//.mdl, .vvd, .dx90.vtx
let path=std::path::PathBuf::from(mdl_path_lower.as_str());
let mut vvd_path=path.clone();
let mut vtx_path=path;
vvd_path.set_extension("vvd");
vtx_path.set_extension("dx90.vtx");
// TODO: search more packs, possibly using an index of multiple packs
let mdl=self.finder.find(mdl_path_lower.as_str())?.ok_or(MeshError::MissingMdl(mdl_path_lower))?;
let vtx=self.finder.find(vtx_path.as_os_str().to_str().unwrap())?.ok_or(MeshError::MissingVtx)?;
let vvd=self.finder.find(vvd_path.as_os_str().to_str().unwrap())?.ok_or(MeshError::MissingVvd)?;
Ok(vmdl::Model::from_parts(
vmdl::mdl::Mdl::read(mdl.as_ref())?,
vmdl::vtx::Vtx::read(vtx.as_ref())?,
vmdl::vvd::Vvd::read(vvd.as_ref())?,
))
}
}
pub struct MeshLoader<'bsp,'vpk,'load,'a>{
finder:BspFinder<'bsp,'vpk>,
deferred_loader:&'load mut strafesnet_deferred_loader::deferred_loader::RenderConfigDeferredLoader<Cow<'a,str>>,
}
impl MeshLoader<'_,'_,'_,'_>{
#[inline]
pub const fn new<'bsp,'vpk,'load,'a>(
finder:BspFinder<'bsp,'vpk>,
deferred_loader:&'load mut strafesnet_deferred_loader::deferred_loader::RenderConfigDeferredLoader<Cow<'a,str>>,
)->MeshLoader<'bsp,'vpk,'load,'a>{
MeshLoader{
finder,
deferred_loader
}
}
}
impl<'bsp,'vpk,'load,'a> Loader for MeshLoader<'bsp,'vpk,'load,'a>
where
'bsp:'a,
'vpk:'a,
{
type Error=MeshError;
type Index=&'a str;
type Resource=Mesh;
fn load(&mut self,index:Self::Index)->Result<Self::Resource,Self::Error>{
let model=ModelLoader::new(self.finder).load(index)?;
let mesh=crate::mesh::convert_mesh(model,&mut self.deferred_loader);
Ok(mesh)
}
}

@ -1,78 +0,0 @@
use std::borrow::Cow;
use strafesnet_common::model;
use strafesnet_deferred_loader::deferred_loader::RenderConfigDeferredLoader;
use crate::valve_transform;
fn ingest_vertex(mb:&mut model::MeshBuilder,vertex:&vmdl::vvd::Vertex,color:model::ColorId)->model::VertexId{
let pos=mb.acquire_pos_id(valve_transform(vertex.position.into()));
let normal=mb.acquire_normal_id(valve_transform(vertex.normal.into()));
let tex=mb.acquire_tex_id(glam::Vec2::from_array(vertex.texture_coordinates));
mb.acquire_vertex_id(model::IndexedVertex{
pos,
tex,
normal,
color,
})
}
pub fn convert_mesh(model:vmdl::Model,deferred_loader:&mut RenderConfigDeferredLoader<Cow<str>>)->model::Mesh{
let texture_paths=model.texture_directories();
if texture_paths.len()!=1{
println!("WARNING: multiple texture paths");
}
let skin=model.skin_tables().nth(0).unwrap();
let mut mb=model::MeshBuilder::new();
let color=mb.acquire_color_id(glam::Vec4::ONE);
let model_vertices=model.vertices();
let mut graphics_groups=Vec::new();
let mut physics_groups=Vec::new();
let polygon_groups=model.meshes().enumerate().map(|(polygon_group_id,mesh)|{
let polygon_group_id=model::PolygonGroupId::new(polygon_group_id as u32);
let render_id=if let (Some(texture_path),Some(texture_name))=(texture_paths.get(0),skin.texture(mesh.material_index())){
let mut path=std::path::PathBuf::from(texture_path.as_str());
path.push(texture_name);
let index=path.as_os_str().to_str().map(|s|Cow::Owned(s.to_owned()));
deferred_loader.acquire_render_config_id(index)
}else{
deferred_loader.acquire_render_config_id(None)
};
graphics_groups.push(model::IndexedGraphicsGroup{
render:render_id,
groups:vec![polygon_group_id],
});
physics_groups.push(model::IndexedPhysicsGroup{
groups:vec![polygon_group_id],
});
model::PolygonGroup::PolygonList(model::PolygonList::new(
//looking at the code, it would seem that the strips are pre-deindexed into triangle lists when calling this function
mesh.vertex_strip_indices().flat_map(|mut strip|{
std::iter::from_fn(move ||{
match (strip.next(),strip.next(),strip.next()){
(Some(v1),Some(v2),Some(v3))=>Some([v1,v2,v3]),
//ignore extra vertices, not sure what to do in this case, failing the whole conversion could be appropriate
_=>None,
}
})
}).flat_map(|[v1,v2,v3]|{
// this should probably be a fatal error :D
let v1=model_vertices.get(v1)?;
let v2=model_vertices.get(v2)?;
let v3=model_vertices.get(v3)?;
Some(vec![
ingest_vertex(&mut mb,v1,color),
ingest_vertex(&mut mb,v2,color),
ingest_vertex(&mut mb,v3,color),
])
}).collect()
))
}).collect();
mb.build(polygon_groups,graphics_groups,physics_groups)
}

@ -1,7 +1,7 @@
[package] [package]
name = "strafesnet_common" name = "strafesnet_common"
version = "0.6.0" version = "0.5.2"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Common types and helpers for Strafe Client associated projects." description = "Common types and helpers for Strafe Client associated projects."
@ -12,8 +12,8 @@ authors = ["Rhys Lloyd <krakow20@gmail.com>"]
[dependencies] [dependencies]
arrayvec = "0.7.4" arrayvec = "0.7.4"
bitflags = "2.6.0" bitflags = "2.6.0"
fixed_wide = { version = "0.2.0", path = "../fixed_wide", registry = "strafesnet", features = ["deferred-division","zeroes","wide-mul"] } fixed_wide = { path = "../fixed_wide", registry = "strafesnet", features = ["deferred-division","zeroes","wide-mul"] }
linear_ops = { version = "0.1.1", path = "../linear_ops", registry = "strafesnet", features = ["deferred-division","named-fields"] } linear_ops = { path = "../linear_ops", registry = "strafesnet", features = ["deferred-division","named-fields"] }
ratio_ops = { version = "0.1.0", path = "../ratio_ops", registry = "strafesnet" } ratio_ops = { path = "../ratio_ops", registry = "strafesnet" }
glam = "0.30.0" glam = "0.29.0"
id = { version = "0.1.0", registry = "strafesnet" } id = { version = "0.1.0", registry = "strafesnet" }

@ -7,57 +7,42 @@ pub struct Aabb{
} }
impl Default for Aabb{ impl Default for Aabb{
#[inline]
fn default()->Self{ fn default()->Self{
Self{min:vec3::MAX,max:vec3::MIN} Self{min:vec3::MAX,max:vec3::MIN}
} }
} }
impl Aabb{ impl Aabb{
#[inline]
pub const fn new(min:Planar64Vec3,max:Planar64Vec3)->Self{ pub const fn new(min:Planar64Vec3,max:Planar64Vec3)->Self{
Self{min,max} Self{min,max}
} }
#[inline]
pub const fn max(&self)->Planar64Vec3{ pub const fn max(&self)->Planar64Vec3{
self.max self.max
} }
#[inline]
pub const fn min(&self)->Planar64Vec3{ pub const fn min(&self)->Planar64Vec3{
self.min self.min
} }
#[inline]
pub fn grow(&mut self,point:Planar64Vec3){ pub fn grow(&mut self,point:Planar64Vec3){
self.min=self.min.min(point); self.min=self.min.min(point);
self.max=self.max.max(point); self.max=self.max.max(point);
} }
#[inline]
pub fn join(&mut self,aabb:&Aabb){ pub fn join(&mut self,aabb:&Aabb){
self.min=self.min.min(aabb.min); self.min=self.min.min(aabb.min);
self.max=self.max.max(aabb.max); self.max=self.max.max(aabb.max);
} }
#[inline]
pub fn inflate(&mut self,hs:Planar64Vec3){ pub fn inflate(&mut self,hs:Planar64Vec3){
self.min-=hs; self.min-=hs;
self.max+=hs; self.max+=hs;
} }
#[inline]
pub fn contains(&self,point:Planar64Vec3)->bool{
let bvec=self.min.lt(point)&point.lt(self.max);
bvec.all()
}
#[inline]
pub fn intersects(&self,aabb:&Aabb)->bool{ pub fn intersects(&self,aabb:&Aabb)->bool{
let bvec=self.min.lt(aabb.max)&aabb.min.lt(self.max); let bvec=self.min.lt(aabb.max)&aabb.min.lt(self.max);
bvec.all() bvec.all()
} }
#[inline]
pub fn size(&self)->Planar64Vec3{ pub fn size(&self)->Planar64Vec3{
self.max-self.min self.max-self.min
} }
#[inline]
pub fn center(&self)->Planar64Vec3{ pub fn center(&self)->Planar64Vec3{
self.min.map_zip(self.max,|(min,max)|min.midpoint(max)) self.min+(self.max-self.min)>>1
} }
//probably use floats for area & volume because we don't care about precision //probably use floats for area & volume because we don't care about precision
// pub fn area_weight(&self)->f32{ // pub fn area_weight(&self)->f32{

@ -1,10 +1,4 @@
use std::cmp::Ordering;
use std::collections::BTreeMap;
use crate::aabb::Aabb; use crate::aabb::Aabb;
use crate::ray::Ray;
use crate::integer::{Ratio,Planar64};
use crate::instruction::{InstructionCollector,TimedInstruction};
//da algaritum //da algaritum
//lista boxens //lista boxens
@ -16,117 +10,35 @@ use crate::instruction::{InstructionCollector,TimedInstruction};
//sort the centerpoints on each axis (3 lists) //sort the centerpoints on each axis (3 lists)
//bv is put into octant based on whether it is upper or lower in each list //bv is put into octant based on whether it is upper or lower in each list
pub enum RecursiveContent<R,T>{
pub fn intersect_aabb(ray:&Ray,aabb:&Aabb)->Option<Ratio<Planar64,Planar64>>{ Branch(Vec<R>),
// n.(o+d*t)==n.p Leaf(T),
// n.o + n.d * t == n.p
// t == (n.p - n.o)/n.d
let mut hit=None;
match ray.direction.x.cmp(&Planar64::ZERO){
Ordering::Less=>{
let rel_min=aabb.min()-ray.origin;
let rel_max=aabb.max()-ray.origin;
let dy=rel_max.x*ray.direction.y;
let dz=rel_max.x*ray.direction.z;
// x is negative, so inequalities are flipped
if rel_min.y*ray.direction.x>dy&&dy>rel_max.y*ray.direction.x
&&rel_min.z*ray.direction.x>dz&&dz>rel_max.z*ray.direction.x{
let t=rel_max.x/ray.direction.x;
hit=Some(hit.map_or(t,|best_t|t.min(best_t)));
}
},
Ordering::Equal=>(),
Ordering::Greater=>{
let rel_min=aabb.min()-ray.origin;
let rel_max=aabb.max()-ray.origin;
let dy=rel_min.x*ray.direction.y;
let dz=rel_min.x*ray.direction.z;
// x is positive, so inequalities are normal
if rel_min.y*ray.direction.x<dy&&dy<rel_max.y*ray.direction.x
&&rel_min.z*ray.direction.x<dz&&dz<rel_max.z*ray.direction.x{
let t=rel_min.x/ray.direction.x;
hit=Some(hit.map_or(t,|best_t|t.min(best_t)));
}
},
}
match ray.direction.z.cmp(&Planar64::ZERO){
Ordering::Less=>{
let rel_min=aabb.min()-ray.origin;
let rel_max=aabb.max()-ray.origin;
let dx=rel_max.z*ray.direction.x;
let dy=rel_max.z*ray.direction.y;
// z is negative, so inequalities are flipped
if rel_min.x*ray.direction.z>dx&&dx>rel_max.x*ray.direction.z
&&rel_min.y*ray.direction.z>dy&&dy>rel_max.y*ray.direction.z{
let t=rel_max.z/ray.direction.z;
hit=Some(hit.map_or(t,|best_t|t.min(best_t)));
}
},
Ordering::Equal=>(),
Ordering::Greater=>{
let rel_min=aabb.min()-ray.origin;
let rel_max=aabb.max()-ray.origin;
let dx=rel_min.z*ray.direction.x;
let dy=rel_min.z*ray.direction.y;
// z is positive, so inequalities are normal
if rel_min.x*ray.direction.z<dx&&dx<rel_max.x*ray.direction.z
&&rel_min.y*ray.direction.z<dy&&dy<rel_max.y*ray.direction.z{
let t=rel_min.z/ray.direction.z;
hit=Some(hit.map_or(t,|best_t|t.min(best_t)));
}
},
}
match ray.direction.y.cmp(&Planar64::ZERO){
Ordering::Less=>{
let rel_min=aabb.min()-ray.origin;
let rel_max=aabb.max()-ray.origin;
let dz=rel_max.y*ray.direction.z;
let dx=rel_max.y*ray.direction.x;
// y is negative, so inequalities are flipped
if rel_min.z*ray.direction.y>dz&&dz>rel_max.z*ray.direction.y
&&rel_min.x*ray.direction.y>dx&&dx>rel_max.x*ray.direction.y{
let t=rel_max.y/ray.direction.y;
hit=Some(hit.map_or(t,|best_t|t.min(best_t)));
}
},
Ordering::Equal=>(),
Ordering::Greater=>{
let rel_min=aabb.min()-ray.origin;
let rel_max=aabb.max()-ray.origin;
let dz=rel_min.y*ray.direction.z;
let dx=rel_min.y*ray.direction.x;
// y is positive, so inequalities are normal
if rel_min.z*ray.direction.y<dz&&dz<rel_max.z*ray.direction.y
&&rel_min.x*ray.direction.y<dx&&dx<rel_max.x*ray.direction.y{
let t=rel_min.y/ray.direction.y;
hit=Some(hit.map_or(t,|best_t|t.min(best_t)));
}
},
}
hit
} }
impl<R,T> Default for RecursiveContent<R,T>{
pub enum RecursiveContent<N,L>{ fn default()->Self{
Branch(Vec<N>),
Leaf(L),
}
impl<N,L> RecursiveContent<N,L>{
pub fn empty()->Self{
Self::Branch(Vec::new()) Self::Branch(Vec::new())
} }
} }
pub struct BvhNode<L>{ pub struct BvhNode<T>{
content:RecursiveContent<BvhNode<L>,L>, content:RecursiveContent<BvhNode<T>,T>,
aabb:Aabb, aabb:Aabb,
} }
impl<L> BvhNode<L>{ impl<T> Default for BvhNode<T>{
pub fn empty()->Self{ fn default()->Self{
Self{ Self{
content:RecursiveContent::empty(), content:Default::default(),
aabb:Aabb::default(), aabb:Aabb::default(),
} }
} }
pub fn sample_aabb<F:FnMut(&L)>(&self,aabb:&Aabb,f:&mut F){ }
pub struct BvhWeightNode<W,T>{
content:RecursiveContent<BvhWeightNode<W,T>,T>,
weight:W,
aabb:Aabb,
}
impl<T> BvhNode<T>{
pub fn the_tester<F:FnMut(&T)>(&self,aabb:&Aabb,f:&mut F){
match &self.content{ match &self.content{
RecursiveContent::Leaf(model)=>f(model), RecursiveContent::Leaf(model)=>f(model),
RecursiveContent::Branch(children)=>for child in children{ RecursiveContent::Branch(children)=>for child in children{
@ -135,101 +47,51 @@ impl<L> BvhNode<L>{
//you're probably not going to spend a lot of time outside the map, //you're probably not going to spend a lot of time outside the map,
//so the test is extra work for nothing //so the test is extra work for nothing
if aabb.intersects(&child.aabb){ if aabb.intersects(&child.aabb){
child.sample_aabb(aabb,f); child.the_tester(aabb,f);
} }
}, },
} }
} }
fn populate_nodes<'a,T,F>( pub fn into_visitor<F:FnMut(T)>(self,f:&mut F){
&'a self, match self.content{
collector:&mut InstructionCollector<&'a L,Ratio<Planar64,Planar64>>, RecursiveContent::Leaf(model)=>f(model),
nodes:&mut BTreeMap<Ratio<Planar64,Planar64>,&'a BvhNode<L>>,
ray:&Ray,
start_time:Ratio<Planar64,Planar64>,
f:&F,
)
where
T:Ord+Copy,
Ratio<Planar64,Planar64>:From<T>,
F:Fn(&L,&Ray)->Option<T>,
{
match &self.content{
RecursiveContent::Leaf(leaf)=>if let Some(time)=f(leaf,ray){
let ins=TimedInstruction{time:time.into(),instruction:leaf};
if start_time.lt_ratio(ins.time)&&ins.time.lt_ratio(collector.time()){
collector.collect(Some(ins));
}
},
RecursiveContent::Branch(children)=>for child in children{ RecursiveContent::Branch(children)=>for child in children{
if child.aabb.contains(ray.origin){ child.into_visitor(f)
child.populate_nodes(collector,nodes,ray,start_time,f); },
}else{
// Am I an upcoming superstar?
if let Some(t)=intersect_aabb(ray,&child.aabb){
if start_time.lt_ratio(t)&&t.lt_ratio(collector.time()){
nodes.insert(t,child);
} }
} }
pub fn weigh_contents<W:Copy+std::iter::Sum<W>,F:Fn(&T)->W>(self,f:&F)->BvhWeightNode<W,T>{
match self.content{
RecursiveContent::Leaf(model)=>BvhWeightNode{
weight:f(&model),
content:RecursiveContent::Leaf(model),
aabb:self.aabb,
},
RecursiveContent::Branch(children)=>{
let branch:Vec<BvhWeightNode<W,T>>=children.into_iter().map(|child|
child.weigh_contents(f)
).collect();
BvhWeightNode{
weight:branch.iter().map(|node|node.weight).sum(),
content:RecursiveContent::Branch(branch),
aabb:self.aabb,
} }
}, },
} }
} }
pub fn sample_ray<T,F>( }
&self,
ray:&Ray,
start_time:T,
time_limit:T,
f:F,
)->Option<(T,&L)>
where
T:Ord+Copy,
T:From<Ratio<Planar64,Planar64>>,
Ratio<Planar64,Planar64>:From<T>,
F:Fn(&L,&Ray)->Option<T>,
{
// source of nondeterminism when Aabb boundaries are coplanar
let mut nodes=BTreeMap::new();
let start_time=start_time.into(); impl <W,T> BvhWeightNode<W,T>{
let time_limit=time_limit.into(); pub const fn weight(&self)->&W{
let mut collector=InstructionCollector::new(time_limit); &self.weight
// break open all nodes that contain ray.origin and populate nodes with future intersection times
self.populate_nodes(&mut collector,&mut nodes,ray,start_time,&f);
// swim through nodes one at a time
while let Some((t,node))=nodes.pop_first(){
if collector.time()<t{
break;
} }
match &node.content{ pub const fn aabb(&self)->&Aabb{
RecursiveContent::Leaf(leaf)=>if let Some(time)=f(leaf,ray){ &self.aabb
let ins=TimedInstruction{time:time.into(),instruction:leaf};
// this lower bound can also be omitted
// but it causes type inference errors lol
if start_time.lt_ratio(ins.time)&&ins.time.lt_ratio(collector.time()){
collector.collect(Some(ins));
} }
}, pub fn into_content(self)->RecursiveContent<BvhWeightNode<W,T>,T>{
// break open the node and predict collisions with the child nodes self.content
RecursiveContent::Branch(children)=>for child in children{
// Am I an upcoming superstar?
if let Some(t)=intersect_aabb(ray,&child.aabb){
// we don't need to check the lower bound
// because child aabbs are guaranteed to be within the parent bounds.
if t<collector.time(){
nodes.insert(t,child);
} }
} pub fn into_visitor<F:FnMut(T)>(self,f:&mut F){
},
}
}
collector.take().map(|TimedInstruction{time,instruction:leaf}|(time.into(),leaf))
}
pub fn into_inner(self)->(RecursiveContent<BvhNode<L>,L>,Aabb){
(self.content,self.aabb)
}
pub fn into_visitor<F:FnMut(L)>(self,f:&mut F){
match self.content{ match self.content{
RecursiveContent::Leaf(model)=>f(model), RecursiveContent::Leaf(model)=>f(model),
RecursiveContent::Branch(children)=>for child in children{ RecursiveContent::Branch(children)=>for child in children{
@ -268,9 +130,9 @@ fn generate_bvh_node<T>(boxen:Vec<(T,Aabb)>,force:bool)->BvhNode<T>{
sort_y.push((i,center.y)); sort_y.push((i,center.y));
sort_z.push((i,center.z)); sort_z.push((i,center.z));
} }
sort_x.sort_by_key(|&(_,c)|c); sort_x.sort_by(|tup0,tup1|tup0.1.cmp(&tup1.1));
sort_y.sort_by_key(|&(_,c)|c); sort_y.sort_by(|tup0,tup1|tup0.1.cmp(&tup1.1));
sort_z.sort_by_key(|&(_,c)|c); sort_z.sort_by(|tup0,tup1|tup0.1.cmp(&tup1.1));
let h=n/2; let h=n/2;
let median_x=sort_x[h].1; let median_x=sort_x[h].1;
let median_y=sort_y[h].1; let median_y=sort_y[h].1;

@ -171,7 +171,4 @@ impl CollisionAttributes{
pub fn contact_default()->Self{ pub fn contact_default()->Self{
Self::Contact(ContactAttributes::default()) Self::Contact(ContactAttributes::default())
} }
pub fn intersect_default()->Self{
Self::Intersect(IntersectAttributes::default())
}
} }

@ -1,6 +1,7 @@
use std::collections::{HashSet,HashMap}; use std::collections::{HashSet,HashMap};
use crate::model::ModelId; use crate::model::ModelId;
use crate::gameplay_style; use crate::gameplay_style;
use crate::updatable::Updatable;
#[derive(Clone)] #[derive(Clone)]
pub struct StageElement{ pub struct StageElement{
@ -127,6 +128,18 @@ impl Stage{
self.unordered_checkpoints.contains(&model_id) self.unordered_checkpoints.contains(&model_id)
} }
} }
#[derive(Default)]
pub struct StageUpdate{
//other behaviour models of this stage can have
ordered_checkpoints:HashMap<CheckpointId,ModelId>,
unordered_checkpoints:HashSet<ModelId>,
}
impl Updatable<StageUpdate> for Stage{
fn update(&mut self,update:StageUpdate){
self.ordered_checkpoints.extend(update.ordered_checkpoints);
self.unordered_checkpoints.extend(update.unordered_checkpoints);
}
}
#[derive(Clone,Copy,Hash,Eq,PartialEq)] #[derive(Clone,Copy,Hash,Eq,PartialEq)]
pub enum Zone{ pub enum Zone{
@ -198,47 +211,34 @@ impl Mode{
pub fn push_stage(&mut self,stage:Stage){ pub fn push_stage(&mut self,stage:Stage){
self.stages.push(stage) self.stages.push(stage)
} }
pub fn get_stage_mut(&mut self,StageId(stage_id):StageId)->Option<&mut Stage>{ pub fn get_stage_mut(&mut self,stage:StageId)->Option<&mut Stage>{
self.stages.get_mut(stage_id as usize) self.stages.get_mut(stage.0 as usize)
} }
pub fn get_spawn_model_id(&self,StageId(stage_id):StageId)->Option<ModelId>{ pub fn get_spawn_model_id(&self,stage:StageId)->Option<ModelId>{
self.stages.get(stage_id as usize).map(|s|s.spawn) self.stages.get(stage.0 as usize).map(|s|s.spawn)
} }
pub fn get_zone(&self,model_id:ModelId)->Option<&Zone>{ pub fn get_zone(&self,model_id:ModelId)->Option<&Zone>{
self.zones.get(&model_id) self.zones.get(&model_id)
} }
pub fn get_stage(&self,StageId(stage_id):StageId)->Option<&Stage>{ pub fn get_stage(&self,stage_id:StageId)->Option<&Stage>{
self.stages.get(stage_id as usize) self.stages.get(stage_id.0 as usize)
} }
pub fn get_element(&self,model_id:ModelId)->Option<&StageElement>{ pub fn get_element(&self,model_id:ModelId)->Option<&StageElement>{
self.elements.get(&model_id) self.elements.get(&model_id)
} }
}
#[derive(Clone)]
pub struct NormalizedMode(Mode);
impl NormalizedMode{
pub fn new(mode:Mode)->Self{
Self(mode)
}
pub fn into_inner(self)->Mode{
let Self(mode)=self;
mode
}
//TODO: put this in the SNF //TODO: put this in the SNF
pub fn denormalize(self)->Mode{ pub fn denormalize_data(&mut self){
let NormalizedMode(mut mode)=self;
//expand and index normalized data //expand and index normalized data
mode.zones.insert(mode.start,Zone::Start); self.zones.insert(self.start,Zone::Start);
for (stage_id,stage) in mode.stages.iter().enumerate(){ for (stage_id,stage) in self.stages.iter().enumerate(){
mode.elements.insert(stage.spawn,StageElement{ self.elements.insert(stage.spawn,StageElement{
stage_id:StageId(stage_id as u32), stage_id:StageId(stage_id as u32),
force:false, force:false,
behaviour:StageElementBehaviour::SpawnAt, behaviour:StageElementBehaviour::SpawnAt,
jump_limit:None, jump_limit:None,
}); });
for (_,&model) in &stage.ordered_checkpoints{ for (_,&model) in &stage.ordered_checkpoints{
mode.elements.insert(model,StageElement{ self.elements.insert(model,StageElement{
stage_id:StageId(stage_id as u32), stage_id:StageId(stage_id as u32),
force:false, force:false,
behaviour:StageElementBehaviour::Checkpoint, behaviour:StageElementBehaviour::Checkpoint,
@ -246,7 +246,7 @@ impl NormalizedMode{
}); });
} }
for &model in &stage.unordered_checkpoints{ for &model in &stage.unordered_checkpoints{
mode.elements.insert(model,StageElement{ self.elements.insert(model,StageElement{
stage_id:StageId(stage_id as u32), stage_id:StageId(stage_id as u32),
force:false, force:false,
behaviour:StageElementBehaviour::Checkpoint, behaviour:StageElementBehaviour::Checkpoint,
@ -254,13 +254,53 @@ impl NormalizedMode{
}); });
} }
} }
mode }
}
//this would be nice as a macro
#[derive(Default)]
pub struct ModeUpdate{
zones:HashMap<ModelId,Zone>,
stages:HashMap<StageId,StageUpdate>,
//mutually exlusive stage element behaviour
elements:HashMap<ModelId,StageElement>,
}
impl Updatable<ModeUpdate> for Mode{
fn update(&mut self,update:ModeUpdate){
self.zones.extend(update.zones);
for (stage,stage_update) in update.stages{
if let Some(stage)=self.stages.get_mut(stage.0 as usize){
stage.update(stage_update);
}
}
self.elements.extend(update.elements);
}
}
impl ModeUpdate{
pub fn zone(model_id:ModelId,zone:Zone)->Self{
let mut mu=Self::default();
mu.zones.insert(model_id,zone);
mu
}
pub fn stage(stage_id:StageId,stage_update:StageUpdate)->Self{
let mut mu=Self::default();
mu.stages.insert(stage_id,stage_update);
mu
}
pub fn element(model_id:ModelId,element:StageElement)->Self{
let mut mu=Self::default();
mu.elements.insert(model_id,element);
mu
}
pub fn map_stage_element_ids<F:Fn(StageId)->StageId>(&mut self,f:F){
for (_,stage_element) in self.elements.iter_mut(){
stage_element.stage_id=f(stage_element.stage_id);
}
} }
} }
#[derive(Default,Clone)] #[derive(Default,Clone)]
pub struct Modes{ pub struct Modes{
modes:Vec<Mode>, pub modes:Vec<Mode>,
} }
impl Modes{ impl Modes{
pub const fn new(modes:Vec<Mode>)->Self{ pub const fn new(modes:Vec<Mode>)->Self{
@ -274,182 +314,19 @@ impl Modes{
pub fn push_mode(&mut self,mode:Mode){ pub fn push_mode(&mut self,mode:Mode){
self.modes.push(mode) self.modes.push(mode)
} }
pub fn get_mode(&self,ModeId(mode_id):ModeId)->Option<&Mode>{ pub fn get_mode(&self,mode:ModeId)->Option<&Mode>{
self.modes.get(mode_id as usize) self.modes.get(mode.0 as usize)
} }
} }
pub struct ModesUpdate{
#[derive(Clone)] modes:HashMap<ModeId,ModeUpdate>,
pub struct NormalizedModes{
modes:Vec<NormalizedMode>,
} }
impl NormalizedModes{ impl Updatable<ModesUpdate> for Modes{
pub fn new(modes:Vec<NormalizedMode>)->Self{ fn update(&mut self,update:ModesUpdate){
Self{modes} for (mode,mode_update) in update.modes{
if let Some(mode)=self.modes.get_mut(mode.0 as usize){
mode.update(mode_update);
} }
pub fn len(&self)->usize{
self.modes.len()
}
pub fn denormalize(self)->Modes{
Modes{
modes:self.modes.into_iter().map(NormalizedMode::denormalize).collect(),
} }
} }
} }
impl IntoIterator for NormalizedModes{
type Item=<Vec<NormalizedMode> as IntoIterator>::Item;
type IntoIter=<Vec<NormalizedMode> as IntoIterator>::IntoIter;
fn into_iter(self)->Self::IntoIter{
self.modes.into_iter()
}
}
#[derive(Default)]
pub struct StageUpdate{
//other behaviour models of this stage can have
ordered_checkpoints:HashMap<CheckpointId,ModelId>,
unordered_checkpoints:HashSet<ModelId>,
}
impl StageUpdate{
fn apply_to(self,stage:&mut Stage){
stage.ordered_checkpoints.extend(self.ordered_checkpoints);
stage.unordered_checkpoints.extend(self.unordered_checkpoints);
}
}
//this would be nice as a macro
#[derive(Default)]
pub struct ModeUpdate{
zones:Option<(ModelId,Zone)>,
stages:Option<(StageId,StageUpdate)>,
//mutually exlusive stage element behaviour
elements:Option<(ModelId,StageElement)>,
}
impl ModeUpdate{
fn apply_to(self,mode:&mut Mode){
mode.zones.extend(self.zones);
if let Some((StageId(stage_id),stage_update))=self.stages{
if let Some(stage)=mode.stages.get_mut(stage_id as usize){
stage_update.apply_to(stage);
}
}
mode.elements.extend(self.elements);
}
}
impl ModeUpdate{
pub fn zone(model_id:ModelId,zone:Zone)->Self{
Self{
zones:Some((model_id,zone)),
stages:None,
elements:None,
}
}
pub fn stage(stage_id:StageId,stage_update:StageUpdate)->Self{
Self{
zones:None,
stages:Some((stage_id,stage_update)),
elements:None,
}
}
pub fn element(model_id:ModelId,element:StageElement)->Self{
Self{
zones:None,
stages:None,
elements:Some((model_id,element)),
}
}
pub fn map_stage_element_ids<F:Fn(StageId)->StageId>(&mut self,f:F){
for (_,stage_element) in self.elements.iter_mut(){
stage_element.stage_id=f(stage_element.stage_id);
}
}
}
struct ModeBuilder{
mode:Mode,
stage_id_map:HashMap<StageId,StageId>,
}
#[derive(Default)]
pub struct ModesBuilder{
modes:HashMap<ModeId,Mode>,
stages:HashMap<ModeId,HashMap<StageId,Stage>>,
mode_updates:Vec<(ModeId,ModeUpdate)>,
stage_updates:Vec<(ModeId,StageId,StageUpdate)>,
}
impl ModesBuilder{
pub fn build_normalized(mut self)->NormalizedModes{
//collect modes and stages into contiguous arrays
let mut unique_modes:Vec<(ModeId,Mode)>
=self.modes.into_iter().collect();
unique_modes.sort_by_key(|&(mode_id,_)|mode_id);
let (mut modes,mode_id_map):(Vec<ModeBuilder>,HashMap<ModeId,ModeId>)
=unique_modes.into_iter().enumerate()
.map(|(final_mode_id,(builder_mode_id,mut mode))|{
(
ModeBuilder{
stage_id_map:self.stages.remove(&builder_mode_id).map_or_else(||HashMap::new(),|stages|{
let mut unique_stages:Vec<(StageId,Stage)>
=stages.into_iter().collect();
unique_stages.sort_by_key(|&(StageId(stage_id),_)|stage_id);
unique_stages.into_iter().enumerate()
.map(|(final_stage_id,(builder_stage_id,stage))|{
mode.push_stage(stage);
(builder_stage_id,StageId::new(final_stage_id as u32))
}).collect()
}),
mode,
},
(
builder_mode_id,
ModeId::new(final_mode_id as u32)
)
)
}).unzip();
//TODO: failure messages or errors or something
//push stage updates
for (builder_mode_id,builder_stage_id,stage_update) in self.stage_updates{
if let Some(final_mode_id)=mode_id_map.get(&builder_mode_id){
if let Some(mode)=modes.get_mut(final_mode_id.get() as usize){
if let Some(&final_stage_id)=mode.stage_id_map.get(&builder_stage_id){
if let Some(stage)=mode.mode.get_stage_mut(final_stage_id){
stage_update.apply_to(stage);
}
}
}
}
}
//push mode updates
for (builder_mode_id,mut mode_update) in self.mode_updates{
if let Some(final_mode_id)=mode_id_map.get(&builder_mode_id){
if let Some(mode)=modes.get_mut(final_mode_id.get() as usize){
//map stage id on stage elements
mode_update.map_stage_element_ids(|stage_id|
//walk down one stage id at a time until a stage is found
//TODO use better logic like BTreeMap::upper_bound instead of walking
// final_stage_id_from_builder_stage_id.upper_bound(Bound::Included(&stage_id))
// .value().copied().unwrap_or(StageId::FIRST)
(0..=stage_id.get()).rev().find_map(|builder_stage_id|
//map the stage element to that stage
mode.stage_id_map.get(&StageId::new(builder_stage_id)).copied()
).unwrap_or(StageId::FIRST)
);
mode_update.apply_to(&mut mode.mode);
}
}
}
NormalizedModes::new(modes.into_iter().map(|mode_builder|NormalizedMode(mode_builder.mode)).collect())
}
pub fn insert_mode(&mut self,mode_id:ModeId,mode:Mode){
assert!(self.modes.insert(mode_id,mode).is_none(),"Cannot replace existing mode");
}
pub fn insert_stage(&mut self,mode_id:ModeId,stage_id:StageId,stage:Stage){
assert!(self.stages.entry(mode_id).or_insert(HashMap::new()).insert(stage_id,stage).is_none(),"Cannot replace existing stage");
}
pub fn push_mode_update(&mut self,mode_id:ModeId,mode_update:ModeUpdate){
self.mode_updates.push((mode_id,mode_update));
}
// fn push_stage_update(&mut self,mode_id:ModeId,stage_id:StageId,stage_update:StageUpdate){
// self.stage_updates.push((mode_id,stage_id,stage_update));
// }
}

@ -63,22 +63,22 @@ impl JumpImpulse{
velocity:Planar64Vec3, velocity:Planar64Vec3,
jump_dir:Planar64Vec3, jump_dir:Planar64Vec3,
gravity:&Planar64Vec3, gravity:&Planar64Vec3,
_mass:Planar64, mass:Planar64,
)->Planar64Vec3{ )->Planar64Vec3{
match self{ match self{
&JumpImpulse::Time(time)=>velocity-(*gravity*time).map(|t|t.divide().clamp_1()), &JumpImpulse::Time(time)=>velocity-(*gravity*time).map(|t|t.divide().fix_1()),
&JumpImpulse::Height(height)=>{ &JumpImpulse::Height(height)=>{
//height==-v.y*v.y/(2*g.y); //height==-v.y*v.y/(2*g.y);
//use energy to determine max height //use energy to determine max height
let gg=gravity.length_squared(); let gg=gravity.length_squared();
let g=gg.sqrt(); let g=gg.sqrt().fix_1();
let v_g=gravity.dot(velocity); let v_g=gravity.dot(velocity);
//do it backwards //do it backwards
let radicand=v_g*v_g+(g*height*2).widen_4(); let radicand=v_g*v_g+(g*height*2).fix_4();
velocity-(*gravity*(radicand.sqrt().wrap_2()+v_g)/gg).divide().clamp_1() velocity-(*gravity*(radicand.sqrt().fix_2()+v_g)/gg).divide().fix_1()
}, },
&JumpImpulse::Linear(jump_speed)=>velocity+(jump_dir*jump_speed/jump_dir.length()).divide().clamp_1(), &JumpImpulse::Linear(jump_speed)=>velocity+(jump_dir*jump_speed/jump_dir.length()).divide().fix_1(),
&JumpImpulse::Energy(_energy)=>{ &JumpImpulse::Energy(energy)=>{
//calculate energy //calculate energy
//let e=gravity.dot(velocity); //let e=gravity.dot(velocity);
//add //add
@ -91,10 +91,10 @@ impl JumpImpulse{
pub fn get_jump_deltav(&self,gravity:&Planar64Vec3,mass:Planar64)->Planar64{ pub fn get_jump_deltav(&self,gravity:&Planar64Vec3,mass:Planar64)->Planar64{
//gravity.length() is actually the proper calculation because the jump is always opposite the gravity direction //gravity.length() is actually the proper calculation because the jump is always opposite the gravity direction
match self{ match self{
&JumpImpulse::Time(time)=>(gravity.length().wrap_1()*time/2).divide().clamp_1(), &JumpImpulse::Time(time)=>(gravity.length().fix_1()*time/2).divide().fix_1(),
&JumpImpulse::Height(height)=>(gravity.length()*height*2).sqrt().wrap_1(), &JumpImpulse::Height(height)=>(gravity.length()*height*2).sqrt().fix_1(),
&JumpImpulse::Linear(deltav)=>deltav, &JumpImpulse::Linear(deltav)=>deltav,
&JumpImpulse::Energy(energy)=>(energy.sqrt()*2/mass.sqrt()).divide().clamp_1(), &JumpImpulse::Energy(energy)=>(energy.sqrt()*2/mass.sqrt()).divide().fix_1(),
} }
} }
} }
@ -126,10 +126,10 @@ impl JumpSettings{
None=>rel_velocity, None=>rel_velocity,
}; };
let j=boost_vel.dot(jump_dir); let j=boost_vel.dot(jump_dir);
let js=jump_speed.widen_2(); let js=jump_speed.fix_2();
if j<js{ if j<js{
//weak booster: just do a regular jump //weak booster: just do a regular jump
boost_vel+jump_dir.with_length(js-j).divide().wrap_1() boost_vel+jump_dir.with_length(js-j).divide().fix_1()
}else{ }else{
//activate booster normally, jump does nothing //activate booster normally, jump does nothing
boost_vel boost_vel
@ -142,13 +142,13 @@ impl JumpSettings{
None=>rel_velocity, None=>rel_velocity,
}; };
let j=boost_vel.dot(jump_dir); let j=boost_vel.dot(jump_dir);
let js=jump_speed.widen_2(); let js=jump_speed.fix_2();
if j<js{ if j<js{
//speed in direction of jump cannot be lower than amount //speed in direction of jump cannot be lower than amount
boost_vel+jump_dir.with_length(js-j).divide().wrap_1() boost_vel+jump_dir.with_length(js-j).divide().fix_1()
}else{ }else{
//boost and jump add together //boost and jump add together
boost_vel+jump_dir.with_length(js).divide().wrap_1() boost_vel+jump_dir.with_length(js).divide().fix_1()
} }
} }
(false,JumpCalculation::Max)=>{ (false,JumpCalculation::Max)=>{
@ -159,10 +159,10 @@ impl JumpSettings{
None=>rel_velocity, None=>rel_velocity,
}; };
let boost_dot=boost_vel.dot(jump_dir); let boost_dot=boost_vel.dot(jump_dir);
let js=jump_speed.widen_2(); let js=jump_speed.fix_2();
if boost_dot<js{ if boost_dot<js{
//weak boost is extended to jump speed //weak boost is extended to jump speed
boost_vel+jump_dir.with_length(js-boost_dot).divide().wrap_1() boost_vel+jump_dir.with_length(js-boost_dot).divide().fix_1()
}else{ }else{
//activate booster normally, jump does nothing //activate booster normally, jump does nothing
boost_vel boost_vel
@ -174,7 +174,7 @@ impl JumpSettings{
Some(booster)=>booster.boost(rel_velocity), Some(booster)=>booster.boost(rel_velocity),
None=>rel_velocity, None=>rel_velocity,
}; };
boost_vel+jump_dir.with_length(jump_speed).divide().wrap_1() boost_vel+jump_dir.with_length(jump_speed).divide().fix_1()
}, },
} }
} }
@ -267,9 +267,9 @@ pub struct StrafeSettings{
impl StrafeSettings{ impl StrafeSettings{
pub fn tick_velocity(&self,velocity:Planar64Vec3,control_dir:Planar64Vec3)->Option<Planar64Vec3>{ pub fn tick_velocity(&self,velocity:Planar64Vec3,control_dir:Planar64Vec3)->Option<Planar64Vec3>{
let d=velocity.dot(control_dir); let d=velocity.dot(control_dir);
let mv=self.mv.widen_2(); let mv=self.mv.fix_2();
match d<mv{ match d<mv{
true=>Some(velocity+(control_dir*self.air_accel_limit.map_or(mv-d,|limit|limit.widen_2().min(mv-d))).wrap_1()), true=>Some(velocity+(control_dir*self.air_accel_limit.map_or(mv-d,|limit|limit.fix_2().min(mv-d))).fix_1()),
false=>None, false=>None,
} }
} }
@ -290,7 +290,7 @@ pub struct PropulsionSettings{
} }
impl PropulsionSettings{ impl PropulsionSettings{
pub fn acceleration(&self,control_dir:Planar64Vec3)->Planar64Vec3{ pub fn acceleration(&self,control_dir:Planar64Vec3)->Planar64Vec3{
(control_dir*self.magnitude).clamp_1() (control_dir*self.magnitude).fix_1()
} }
} }
@ -310,13 +310,13 @@ pub struct WalkSettings{
impl WalkSettings{ impl WalkSettings{
pub fn accel(&self,target_diff:Planar64Vec3,gravity:Planar64Vec3)->Planar64{ pub fn accel(&self,target_diff:Planar64Vec3,gravity:Planar64Vec3)->Planar64{
//TODO: fallible walk accel //TODO: fallible walk accel
let diff_len=target_diff.length().wrap_1(); let diff_len=target_diff.length().fix_1();
let friction=if diff_len<self.accelerate.topspeed{ let friction=if diff_len<self.accelerate.topspeed{
self.static_friction self.static_friction
}else{ }else{
self.kinetic_friction self.kinetic_friction
}; };
self.accelerate.accel.min((-gravity.y*friction).clamp_1()) self.accelerate.accel.min((-gravity.y*friction).fix_1())
} }
pub fn get_walk_target_velocity(&self,control_dir:Planar64Vec3,normal:Planar64Vec3)->Planar64Vec3{ pub fn get_walk_target_velocity(&self,control_dir:Planar64Vec3,normal:Planar64Vec3)->Planar64Vec3{
if control_dir==crate::integer::vec3::ZERO{ if control_dir==crate::integer::vec3::ZERO{
@ -332,7 +332,7 @@ impl WalkSettings{
if cr==crate::integer::vec3::ZERO_2{ if cr==crate::integer::vec3::ZERO_2{
crate::integer::vec3::ZERO crate::integer::vec3::ZERO
}else{ }else{
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().clamp_1() (cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().fix_1()
} }
}else{ }else{
crate::integer::vec3::ZERO crate::integer::vec3::ZERO
@ -341,7 +341,7 @@ impl WalkSettings{
pub fn is_slope_walkable(&self,normal:Planar64Vec3,up:Planar64Vec3)->bool{ pub fn is_slope_walkable(&self,normal:Planar64Vec3,up:Planar64Vec3)->bool{
//normal is not guaranteed to be unit length //normal is not guaranteed to be unit length
let ny=normal.dot(up); let ny=normal.dot(up);
let h=normal.length().wrap_1(); let h=normal.length().fix_1();
//remember this is a normal vector //remember this is a normal vector
ny.is_positive()&&h*self.surf_dot<ny ny.is_positive()&&h*self.surf_dot<ny
} }
@ -355,7 +355,7 @@ pub struct LadderSettings{
pub dot:Planar64, pub dot:Planar64,
} }
impl LadderSettings{ impl LadderSettings{
pub const fn accel(&self,_target_diff:Planar64Vec3,_gravity:Planar64Vec3)->Planar64{ pub const fn accel(&self,target_diff:Planar64Vec3,gravity:Planar64Vec3)->Planar64{
//TODO: fallible ladder accel //TODO: fallible ladder accel
self.accelerate.accel self.accelerate.accel
} }
@ -368,13 +368,13 @@ impl LadderSettings{
let nnmm=nn*mm; let nnmm=nn*mm;
let d=normal.dot(control_dir); let d=normal.dot(control_dir);
let mut dd=d*d; let mut dd=d*d;
if (self.dot*self.dot*nnmm).clamp_4()<dd{ if (self.dot*self.dot*nnmm).fix_4()<dd{
if d.is_negative(){ if d.is_negative(){
control_dir=Planar64Vec3::new([Planar64::ZERO,mm.clamp_1(),Planar64::ZERO]); control_dir=Planar64Vec3::new([Planar64::ZERO,mm.fix_1(),Planar64::ZERO]);
}else{ }else{
control_dir=Planar64Vec3::new([Planar64::ZERO,-mm.clamp_1(),Planar64::ZERO]); control_dir=Planar64Vec3::new([Planar64::ZERO,-mm.fix_1(),Planar64::ZERO]);
} }
dd=(normal.y*normal.y).widen_4(); dd=(normal.y*normal.y).fix_4();
} }
//n=d if you are standing on top of a ladder and press E. //n=d if you are standing on top of a ladder and press E.
//two fixes: //two fixes:
@ -385,7 +385,7 @@ impl LadderSettings{
if cr==crate::integer::vec3::ZERO_2{ if cr==crate::integer::vec3::ZERO_2{
crate::integer::vec3::ZERO crate::integer::vec3::ZERO
}else{ }else{
(cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().clamp_1() (cr.cross(normal)*self.accelerate.topspeed/((nn*(nnmm-dd)).sqrt())).divide().fix_1()
} }
}else{ }else{
crate::integer::vec3::ZERO crate::integer::vec3::ZERO
@ -417,7 +417,7 @@ impl Hitbox{
} }
pub fn source()->Self{ pub fn source()->Self{
Self{ Self{
halfsize:((int3(33,73,33)>>1)*VALVE_SCALE).narrow_1().unwrap(), halfsize:((int3(33,73,33)>>1)*VALVE_SCALE).fix_1(),
mesh:HitboxMesh::Box, mesh:HitboxMesh::Box,
} }
} }
@ -529,20 +529,20 @@ impl StyleModifiers{
pub fn source_bhop()->Self{ pub fn source_bhop()->Self{
Self{ Self{
controls_mask:Controls::all(), controls_mask:Controls::all()-Controls::MoveUp-Controls::MoveDown,
controls_mask_state:Controls::all(), controls_mask_state:Controls::all(),
strafe:Some(StrafeSettings{ strafe:Some(StrafeSettings{
enable:ControlsActivation::full_2d(), enable:ControlsActivation::full_2d(),
air_accel_limit:Some(Planar64::raw((150<<28)*100)), air_accel_limit:Some(Planar64::raw(150<<28)*100),
mv:Planar64::raw(30<<28), mv:(Planar64::raw(30)*VALVE_SCALE).fix_1(),
tick_rate:Ratio64::new(100,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(), tick_rate:Ratio64::new(100,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}), }),
jump:Some(JumpSettings{ jump:Some(JumpSettings{
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).narrow_1().unwrap()), impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).fix_1()),
calculation:JumpCalculation::JumpThenBoost, calculation:JumpCalculation::JumpThenBoost,
limit_minimum:true, limit_minimum:true,
}), }),
gravity:(int3(0,-800,0)*VALVE_SCALE).narrow_1().unwrap(), gravity:(int3(0,-800,0)*VALVE_SCALE).fix_1(),
mass:int(1), mass:int(1),
rocket:None, rocket:None,
walk:Some(WalkSettings{ walk:Some(WalkSettings{
@ -565,25 +565,25 @@ impl StyleModifiers{
magnitude:int(12),//? magnitude:int(12),//?
}), }),
hitbox:Hitbox::source(), hitbox:Hitbox::source(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).narrow_1().unwrap(), camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).fix_1(),
} }
} }
pub fn source_surf()->Self{ pub fn source_surf()->Self{
Self{ Self{
controls_mask:Controls::all(), controls_mask:Controls::all()-Controls::MoveUp-Controls::MoveDown,
controls_mask_state:Controls::all(), controls_mask_state:Controls::all(),
strafe:Some(StrafeSettings{ strafe:Some(StrafeSettings{
enable:ControlsActivation::full_2d(), enable:ControlsActivation::full_2d(),
air_accel_limit:Some((int(150)*66*VALVE_SCALE).narrow_1().unwrap()), air_accel_limit:Some((int(150)*66*VALVE_SCALE).fix_1()),
mv:Planar64::raw(30<<28), mv:(int(30)*VALVE_SCALE).fix_1(),
tick_rate:Ratio64::new(66,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(), tick_rate:Ratio64::new(66,AbsoluteTime::ONE_SECOND.get() as u64).unwrap(),
}), }),
jump:Some(JumpSettings{ jump:Some(JumpSettings{
impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).narrow_1().unwrap()), impulse:JumpImpulse::Height((int(52)*VALVE_SCALE).fix_1()),
calculation:JumpCalculation::JumpThenBoost, calculation:JumpCalculation::JumpThenBoost,
limit_minimum:true, limit_minimum:true,
}), }),
gravity:(int3(0,-800,0)*VALVE_SCALE).narrow_1().unwrap(), gravity:(int3(0,-800,0)*VALVE_SCALE).fix_1(),
mass:int(1), mass:int(1),
rocket:None, rocket:None,
walk:Some(WalkSettings{ walk:Some(WalkSettings{
@ -606,7 +606,7 @@ impl StyleModifiers{
magnitude:int(12),//? magnitude:int(12),//?
}), }),
hitbox:Hitbox::source(), hitbox:Hitbox::source(),
camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).narrow_1().unwrap(), camera_offset:((int3(0,64,0)-(int3(0,73,0)>>1))*VALVE_SCALE).fix_1(),
} }
} }
} }

@ -1,35 +1,27 @@
#[derive(Clone,Debug)] use crate::integer::Time;
#[derive(Debug)]
pub struct TimedInstruction<I,T>{ pub struct TimedInstruction<I,T>{
pub time:T, pub time:Time<T>,
pub instruction:I, pub instruction:I,
} }
impl<I,T> TimedInstruction<I,T>{
#[inline]
pub fn set_time<T2>(self,new_time:T2)->TimedInstruction<I,T2>{
TimedInstruction{
time:new_time,
instruction:self.instruction,
}
}
}
/// Ensure all emitted instructions are processed before consuming external instructions /// Ensure all emitted instructions are processed before consuming external instructions
pub trait InstructionEmitter<I>{ pub trait InstructionEmitter<I>{
type Time; type TimeInner;
fn next_instruction(&self,time_limit:Self::Time)->Option<TimedInstruction<I,Self::Time>>; fn next_instruction(&self,time_limit:Time<Self::TimeInner>)->Option<TimedInstruction<I,Self::TimeInner>>;
} }
/// Apply an atomic state update /// Apply an atomic state update
pub trait InstructionConsumer<I>{ pub trait InstructionConsumer<I>{
type Time; type TimeInner;
fn process_instruction(&mut self,instruction:TimedInstruction<I,Self::Time>); fn process_instruction(&mut self,instruction:TimedInstruction<I,Self::TimeInner>);
} }
/// If the object produces its own instructions, allow exhaustively feeding them back in /// If the object produces its own instructions, allow exhaustively feeding them back in
pub trait InstructionFeedback<I,T>:InstructionEmitter<I,Time=T>+InstructionConsumer<I,Time=T> pub trait InstructionFeedback<I,T>:InstructionEmitter<I,TimeInner=T>+InstructionConsumer<I,TimeInner=T>
where where
T:Copy, Time<T>:Copy,
{ {
#[inline] fn process_exhaustive(&mut self,time_limit:Time<T>){
fn process_exhaustive(&mut self,time_limit:T){
while let Some(instruction)=self.next_instruction(time_limit){ while let Some(instruction)=self.next_instruction(time_limit){
self.process_instruction(instruction); self.process_instruction(instruction);
} }
@ -37,46 +29,83 @@ pub trait InstructionFeedback<I,T>:InstructionEmitter<I,Time=T>+InstructionConsu
} }
impl<I,T,X> InstructionFeedback<I,T> for X impl<I,T,X> InstructionFeedback<I,T> for X
where where
T:Copy, Time<T>:Copy,
X:InstructionEmitter<I,Time=T>+InstructionConsumer<I,Time=T>, X:InstructionEmitter<I,TimeInner=T>+InstructionConsumer<I,TimeInner=T>,
{} {}
pub struct InstructionCache<S,I,T>{
instruction_machine:S,
cached_instruction:Option<TimedInstruction<I,T>>,
time_limit:Time<T>,
}
impl<S,I,T> InstructionCache<S,I,T>
where
Time<T>:Copy+Ord,
Option<TimedInstruction<I,T>>:Clone,
S:InstructionEmitter<I,TimeInner=T>+InstructionConsumer<I,TimeInner=T>
{
pub fn new(
instruction_machine:S,
)->Self{
Self{
instruction_machine,
cached_instruction:None,
time_limit:Time::MIN,
}
}
pub fn next_instruction_cached(&mut self,time_limit:Time<T>)->Option<TimedInstruction<I,T>>{
if time_limit<self.time_limit{
return self.cached_instruction.clone();
}
let next_instruction=self.instruction_machine.next_instruction(time_limit);
self.cached_instruction=next_instruction.clone();
self.time_limit=time_limit;
next_instruction
}
pub fn process_instruction(&mut self,instruction:TimedInstruction<I,T>){
// invalidate cache
self.time_limit=Time::MIN;
self.instruction_machine.process_instruction(instruction);
}
}
//PROPER PRIVATE FIELDS!!! //PROPER PRIVATE FIELDS!!!
pub struct InstructionCollector<I,T>{ pub struct InstructionCollector<I,T>{
time:T, time:Time<T>,
instruction:Option<I>, instruction:Option<I>,
} }
impl<I,T> InstructionCollector<I,T>{ impl<I,T> InstructionCollector<I,T>
#[inline] where Time<T>:Copy+PartialOrd,
pub const fn new(time:T)->Self{ {
pub const fn new(time:Time<T>)->Self{
Self{ Self{
time, time,
instruction:None instruction:None
} }
} }
#[inline] #[inline]
pub fn take(self)->Option<TimedInstruction<I,T>>{ pub const fn time(&self)->Time<T>{
//STEAL INSTRUCTION AND DESTROY INSTRUCTIONCOLLECTOR
self.instruction.map(|instruction|TimedInstruction{
time:self.time,
instruction
})
}
}
impl<I,T:Copy> InstructionCollector<I,T>{
#[inline]
pub const fn time(&self)->T{
self.time self.time
} }
}
impl<I,T:PartialOrd> InstructionCollector<I,T>{
#[inline]
pub fn collect(&mut self,instruction:Option<TimedInstruction<I,T>>){ pub fn collect(&mut self,instruction:Option<TimedInstruction<I,T>>){
if let Some(ins)=instruction{ match instruction{
if ins.time<self.time{ Some(unwrap_instruction)=>{
self.time=ins.time; if unwrap_instruction.time<self.time {
self.instruction=Some(ins.instruction); self.time=unwrap_instruction.time;
self.instruction=Some(unwrap_instruction.instruction);
} }
},
None=>(),
}
}
pub fn instruction(self)->Option<TimedInstruction<I,T>>{
//STEAL INSTRUCTION AND DESTROY INSTRUCTIONCOLLECTOR
match self.instruction{
Some(instruction)=>Some(TimedInstruction{
time:self.time,
instruction
}),
None=>None,
} }
} }
} }

@ -1,20 +1,19 @@
pub use fixed_wide::fixed::*; pub use fixed_wide::fixed::{Fixed,Fix};
pub use ratio_ops::ratio::{Ratio,Divide}; pub use ratio_ops::ratio::{Ratio,Divide};
//integer units //integer units
/// specific example of a "default" time type /// specific example of a "default" time type
#[derive(Clone,Copy,Hash,Eq,PartialEq,Ord,PartialOrd,Debug)] #[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{} pub enum TimeInner{}
pub type AbsoluteTime=Time<TimeInner>; pub type AbsoluteTime=Time<TimeInner>;
#[derive(Clone,Copy,Hash,Eq,PartialEq,Ord,PartialOrd,Debug)] #[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub struct Time<T>(i64,core::marker::PhantomData<T>); pub struct Time<T>(i64,core::marker::PhantomData<T>);
impl<T> Time<T>{ impl<T> Time<T>{
pub const MIN:Self=Self::raw(i64::MIN); pub const MIN:Self=Self::raw(i64::MIN);
pub const MAX:Self=Self::raw(i64::MAX); pub const MAX:Self=Self::raw(i64::MAX);
pub const ZERO:Self=Self::raw(0); pub const ZERO:Self=Self::raw(0);
pub const EPSILON:Self=Self::raw(1);
pub const ONE_SECOND:Self=Self::raw(1_000_000_000); pub const ONE_SECOND:Self=Self::raw(1_000_000_000);
pub const ONE_MILLISECOND:Self=Self::raw(1_000_000); pub const ONE_MILLISECOND:Self=Self::raw(1_000_000);
pub const ONE_MICROSECOND:Self=Self::raw(1_000); pub const ONE_MICROSECOND:Self=Self::raw(1_000);
@ -60,24 +59,18 @@ impl<T> Time<T>{
impl<T> From<Planar64> for Time<T>{ impl<T> From<Planar64> for Time<T>{
#[inline] #[inline]
fn from(value:Planar64)->Self{ fn from(value:Planar64)->Self{
Self::raw((value*Planar64::raw(1_000_000_000)).clamp_1().to_raw()) Self::raw((value*Planar64::raw(1_000_000_000)).fix_1().to_raw())
}
}
impl<T> From<Time<T>> for Ratio<Planar64,Planar64>{
#[inline]
fn from(value:Time<T>)->Self{
value.to_ratio()
} }
} }
impl<T,Num,Den,N1,T1> From<Ratio<Num,Den>> for Time<T> impl<T,Num,Den,N1,T1> From<Ratio<Num,Den>> for Time<T>
where where
Num:core::ops::Mul<Planar64,Output=N1>, Num:core::ops::Mul<Planar64,Output=N1>,
N1:Divide<Den,Output=T1>, N1:Divide<Den,Output=T1>,
T1:Clamp<Planar64>, T1:Fix<Planar64>,
{ {
#[inline] #[inline]
fn from(value:Ratio<Num,Den>)->Self{ fn from(value:Ratio<Num,Den>)->Self{
Self::raw((value*Planar64::raw(1_000_000_000)).divide().clamp().to_raw()) Self::raw((value*Planar64::raw(1_000_000_000)).divide().fix().to_raw())
} }
} }
impl<T> std::fmt::Display for Time<T>{ impl<T> std::fmt::Display for Time<T>{
@ -401,10 +394,6 @@ impl Angle32{
pub const NEG_FRAC_PI_2:Self=Self(-1<<30); pub const NEG_FRAC_PI_2:Self=Self(-1<<30);
pub const PI:Self=Self(-1<<31); pub const PI:Self=Self(-1<<31);
#[inline] #[inline]
pub const fn raw(num:i32)->Self{
Self(num)
}
#[inline]
pub const fn wrap_from_i64(theta:i64)->Self{ pub const fn wrap_from_i64(theta:i64)->Self{
//take lower bits //take lower bits
//note: this was checked on compiler explorer and compiles to 1 instruction! //note: this was checked on compiler explorer and compiles to 1 instruction!
@ -519,8 +508,8 @@ fn angle_sin_cos(){
println!("cordic s={} c={}",(s/h).divide(),(c/h).divide()); println!("cordic s={} c={}",(s/h).divide(),(c/h).divide());
let (fs,fc)=f.sin_cos(); let (fs,fc)=f.sin_cos();
println!("float s={} c={}",fs,fc); println!("float s={} c={}",fs,fc);
assert!(close_enough((c/h).divide().wrap_1(),Planar64::raw((fc*((1u64<<32) as f64)) as i64))); assert!(close_enough((c/h).divide().fix_1(),Planar64::raw((fc*((1u64<<32) as f64)) as i64)));
assert!(close_enough((s/h).divide().wrap_1(),Planar64::raw((fs*((1u64<<32) as f64)) as i64))); assert!(close_enough((s/h).divide().fix_1(),Planar64::raw((fs*((1u64<<32) as f64)) as i64)));
} }
test_angle(1.0); test_angle(1.0);
test_angle(std::f64::consts::PI/4.0); test_angle(std::f64::consts::PI/4.0);
@ -562,10 +551,6 @@ pub mod vec3{
pub const MAX:Planar64Vec3=Planar64Vec3::new([Planar64::MAX;3]); pub const MAX:Planar64Vec3=Planar64Vec3::new([Planar64::MAX;3]);
pub const ZERO:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO;3]); pub const ZERO:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO;3]);
pub const ZERO_2:linear_ops::types::Vector3<Fixed::<2,64>>=linear_ops::types::Vector3::new([Fixed::<2,64>::ZERO;3]); pub const ZERO_2:linear_ops::types::Vector3<Fixed::<2,64>>=linear_ops::types::Vector3::new([Fixed::<2,64>::ZERO;3]);
pub const ZERO_3:linear_ops::types::Vector3<Fixed::<3,96>>=linear_ops::types::Vector3::new([Fixed::<3,96>::ZERO;3]);
pub const ZERO_4:linear_ops::types::Vector3<Fixed::<4,128>>=linear_ops::types::Vector3::new([Fixed::<4,128>::ZERO;3]);
pub const ZERO_5:linear_ops::types::Vector3<Fixed::<5,160>>=linear_ops::types::Vector3::new([Fixed::<5,160>::ZERO;3]);
pub const ZERO_6:linear_ops::types::Vector3<Fixed::<6,192>>=linear_ops::types::Vector3::new([Fixed::<6,192>::ZERO;3]);
pub const X:Planar64Vec3=Planar64Vec3::new([Planar64::ONE,Planar64::ZERO,Planar64::ZERO]); pub const X:Planar64Vec3=Planar64Vec3::new([Planar64::ONE,Planar64::ZERO,Planar64::ZERO]);
pub const Y:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::ONE,Planar64::ZERO]); pub const Y:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::ONE,Planar64::ZERO]);
pub const Z:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::ZERO,Planar64::ONE]); pub const Z:Planar64Vec3=Planar64Vec3::new([Planar64::ZERO,Planar64::ZERO,Planar64::ONE]);
@ -633,8 +618,8 @@ pub mod mat3{
let (yc,ys)=y.cos_sin(); let (yc,ys)=y.cos_sin();
Planar64Mat3::from_cols([ Planar64Mat3::from_cols([
Planar64Vec3::new([xc,Planar64::ZERO,-xs]), Planar64Vec3::new([xc,Planar64::ZERO,-xs]),
Planar64Vec3::new([(xs*ys).wrap_1(),yc,(xc*ys).wrap_1()]), Planar64Vec3::new([(xs*ys).fix_1(),yc,(xc*ys).fix_1()]),
Planar64Vec3::new([(xs*yc).wrap_1(),-ys,(xc*yc).wrap_1()]), Planar64Vec3::new([(xs*yc).fix_1(),-ys,(xc*yc).fix_1()]),
]) ])
} }
#[inline] #[inline]
@ -662,21 +647,13 @@ pub struct Planar64Affine3{
pub translation:Planar64Vec3, pub translation:Planar64Vec3,
} }
impl Planar64Affine3{ impl Planar64Affine3{
pub const IDENTITY:Self=Self::new(mat3::identity(),vec3::ZERO);
#[inline] #[inline]
pub const fn new(matrix3:Planar64Mat3,translation:Planar64Vec3)->Self{ pub const fn new(matrix3:Planar64Mat3,translation:Planar64Vec3)->Self{
Self{matrix3,translation} Self{matrix3,translation}
} }
#[inline] #[inline]
pub const fn from_translation(translation:Planar64Vec3)->Self{
Self{
matrix3:mat3::identity(),
translation,
}
}
#[inline]
pub fn transform_point3(&self,point:Planar64Vec3)->vec3::Vector3<Fixed<2,64>>{ pub fn transform_point3(&self,point:Planar64Vec3)->vec3::Vector3<Fixed<2,64>>{
self.translation.widen_2()+self.matrix3*point self.translation.fix_2()+self.matrix3*point
} }
} }
impl Into<glam::Mat4> for Planar64Affine3{ impl Into<glam::Mat4> for Planar64Affine3{

@ -1,6 +1,5 @@
pub mod bvh; pub mod bvh;
pub mod map; pub mod map;
pub mod ray;
pub mod run; pub mod run;
pub mod aabb; pub mod aabb;
pub mod model; pub mod model;
@ -9,6 +8,7 @@ pub mod timer;
pub mod integer; pub mod integer;
pub mod physics; pub mod physics;
pub mod session; pub mod session;
pub mod updatable;
pub mod instruction; pub mod instruction;
pub mod gameplay_attributes; pub mod gameplay_attributes;
pub mod gameplay_modes; pub mod gameplay_modes;

@ -4,7 +4,7 @@ use crate::gameplay_attributes;
//this is a temporary struct to try to get the code running again //this is a temporary struct to try to get the code running again
//TODO: use snf::map::Region to update the data in physics and graphics instead of this //TODO: use snf::map::Region to update the data in physics and graphics instead of this
pub struct CompleteMap{ pub struct CompleteMap{
pub modes:gameplay_modes::NormalizedModes, pub modes:gameplay_modes::Modes,
pub attributes:Vec<gameplay_attributes::CollisionAttributes>, pub attributes:Vec<gameplay_attributes::CollisionAttributes>,
pub meshes:Vec<model::Mesh>, pub meshes:Vec<model::Mesh>,
pub models:Vec<model::Model>, pub models:Vec<model::Model>,

@ -1,5 +1,3 @@
use std::collections::HashMap;
use crate::integer::{Planar64Vec3,Planar64Affine3}; use crate::integer::{Planar64Vec3,Planar64Affine3};
use crate::gameplay_attributes; use crate::gameplay_attributes;
@ -125,87 +123,6 @@ pub struct Mesh{
pub physics_groups:Vec<IndexedPhysicsGroup>, pub physics_groups:Vec<IndexedPhysicsGroup>,
} }
#[derive(Default)]
pub struct MeshBuilder{
unique_pos:Vec<Planar64Vec3>,//Unit32Vec3
unique_normal:Vec<Planar64Vec3>,//Unit32Vec3
unique_tex:Vec<TextureCoordinate>,
unique_color:Vec<Color4>,
unique_vertices:Vec<IndexedVertex>,
pos_id_from:HashMap<Planar64Vec3,PositionId>,//Unit32Vec3
normal_id_from:HashMap<Planar64Vec3,NormalId>,//Unit32Vec3
tex_id_from:HashMap<[u32;2],TextureCoordinateId>,
color_id_from:HashMap<[u32;4],ColorId>,
vertex_id_from:HashMap<IndexedVertex,VertexId>,
}
impl MeshBuilder{
pub fn new()->Self{
Self::default()
}
pub fn build(
self,
polygon_groups:Vec<PolygonGroup>,
graphics_groups:Vec<IndexedGraphicsGroup>,
physics_groups:Vec<IndexedPhysicsGroup>,
)->Mesh{
let MeshBuilder{
unique_pos,
unique_normal,
unique_tex,
unique_color,
unique_vertices,
..
}=self;
Mesh{
unique_pos,
unique_normal,
unique_tex,
unique_color,
unique_vertices,
polygon_groups,
graphics_groups,
physics_groups,
}
}
pub fn acquire_pos_id(&mut self,pos:Planar64Vec3)->PositionId{
*self.pos_id_from.entry(pos).or_insert_with(||{
let pos_id=PositionId::new(self.unique_pos.len() as u32);
self.unique_pos.push(pos);
pos_id
})
}
pub fn acquire_normal_id(&mut self,normal:Planar64Vec3)->NormalId{
*self.normal_id_from.entry(normal).or_insert_with(||{
let normal_id=NormalId::new(self.unique_normal.len() as u32);
self.unique_normal.push(normal);
normal_id
})
}
pub fn acquire_tex_id(&mut self,tex:TextureCoordinate)->TextureCoordinateId{
let h=tex.to_array().map(f32::to_bits);
*self.tex_id_from.entry(h).or_insert_with(||{
let tex_id=TextureCoordinateId::new(self.unique_tex.len() as u32);
self.unique_tex.push(tex);
tex_id
})
}
pub fn acquire_color_id(&mut self,color:Color4)->ColorId{
let h=color.to_array().map(f32::to_bits);
*self.color_id_from.entry(h).or_insert_with(||{
let color_id=ColorId::new(self.unique_color.len() as u32);
self.unique_color.push(color);
color_id
})
}
pub fn acquire_vertex_id(&mut self,vertex:IndexedVertex)->VertexId{
*self.vertex_id_from.entry(vertex.clone()).or_insert_with(||{
let vertex_id=VertexId::new(self.unique_vertices.len() as u32);
self.unique_vertices.push(vertex);
vertex_id
})
}
}
#[derive(Debug,Clone,Copy,Hash,id::Id,Eq,PartialEq)] #[derive(Debug,Clone,Copy,Hash,id::Id,Eq,PartialEq)]
pub struct ModelId(u32); pub struct ModelId(u32);
pub struct Model{ pub struct Model{

@ -1,21 +1,22 @@
use crate::mouse::MouseState; use crate::mouse::MouseState;
use crate::gameplay_modes::{ModeId,StageId};
#[derive(Clone,Copy,Hash,Eq,PartialEq,Ord,PartialOrd,Debug)] #[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{} pub enum TimeInner{}
pub type Time=crate::integer::Time<TimeInner>; pub type Time=crate::integer::Time<TimeInner>;
#[derive(Clone,Debug)] #[derive(Clone,Debug)]
pub enum Instruction{ pub enum Instruction{
Mouse(MouseInstruction), Mouse(MouseInstruction),
SetControl(SetControlInstruction), Other(OtherInstruction),
Mode(ModeInstruction),
Misc(MiscInstruction),
/// Idle: there were no input events, but the simulation is safe to advance to this timestep
Idle,
} }
impl Instruction{ impl Instruction{
pub const IDLE:Self=Self::Idle; pub const IDLE:Self=Self::Other(OtherInstruction::Other(OtherOtherInstruction::Idle));
}
#[derive(Clone,Debug)]
pub enum OtherInstruction{
SetControl(SetControlInstruction),
Mode(ModeInstruction),
Other(OtherOtherInstruction),
} }
#[derive(Clone,Debug)] #[derive(Clone,Debug)]
pub enum MouseInstruction{ pub enum MouseInstruction{
@ -43,14 +44,15 @@ pub enum ModeInstruction{
/// This forgets all inputs and settings which need to be reapplied. /// This forgets all inputs and settings which need to be reapplied.
Reset, Reset,
/// Restart: Teleport to the start zone. /// Restart: Teleport to the start zone.
/// This runs when you press R or teleport to a bonus Restart,
Restart(ModeId),
/// Spawn: Teleport to a specific mode's spawn /// Spawn: Teleport to a specific mode's spawn
/// This runs when the map loads to put you at the map lobby /// Sets current mode & spawn
Spawn(ModeId,StageId), Spawn(crate::gameplay_modes::ModeId,crate::gameplay_modes::StageId),
} }
#[derive(Clone,Debug)] #[derive(Clone,Debug)]
pub enum MiscInstruction{ pub enum OtherOtherInstruction{
/// Idle: there were no input events, but the simulation is safe to advance to this timestep
Idle,
PracticeFly, PracticeFly,
SetSensitivity(crate::integer::Ratio64Vec2), SetSensitivity(crate::integer::Ratio64Vec2),
} }

@ -1,20 +0,0 @@
use ratio_ops::ratio::Ratio;
use crate::integer::{self,Planar64,Planar64Vec3};
pub struct Ray{
pub origin:Planar64Vec3,
pub direction:Planar64Vec3,
}
impl Ray{
pub fn extrapolate<Num,Den,N1,T1>(&self,t:Ratio<Num,Den>)->Planar64Vec3
where
Num:Copy,
Den:Copy,
Num:core::ops::Mul<Planar64,Output=N1>,
Planar64:core::ops::Mul<Den,Output=N1>,
N1:integer::Divide<Den,Output=T1>,
T1:integer::Clamp<Planar64>,
{
self.origin+self.direction.map(|elem|(t*elem).divide().clamp())
}
}

@ -2,7 +2,7 @@ use crate::timer::{TimerFixed,Realtime,Paused,Unpaused};
use crate::physics::{TimeInner as PhysicsTimeInner,Time as PhysicsTime}; use crate::physics::{TimeInner as PhysicsTimeInner,Time as PhysicsTime};
#[derive(Clone,Copy,Hash,Eq,PartialEq,Ord,PartialOrd,Debug)] #[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{} pub enum TimeInner{}
pub type Time=crate::integer::Time<TimeInner>; pub type Time=crate::integer::Time<TimeInner>;
@ -76,7 +76,7 @@ impl Run{
match &self.state{ match &self.state{
RunState::Created=>Time::ZERO, RunState::Created=>Time::ZERO,
RunState::Started{timer}=>timer.time(time), RunState::Started{timer}=>timer.time(time),
RunState::Finished{timer}=>timer.time(), RunState::Finished{timer}=>timer.time(time),
} }
} }
pub fn start(&mut self,time:PhysicsTime)->Result<(),Error>{ pub fn start(&mut self,time:PhysicsTime)->Result<(),Error>{
@ -110,10 +110,4 @@ impl Run{
self.flagged=Some(flag_reason); self.flagged=Some(flag_reason);
} }
} }
pub fn get_finish_time(&self)->Option<Time>{
match &self.state{
RunState::Finished{timer}=>Some(timer.time()),
_=>None,
}
}
} }

@ -1,3 +1,3 @@
#[derive(Clone,Copy,Hash,Eq,PartialEq,Ord,PartialOrd,Debug)] #[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum TimeInner{} pub enum TimeInner{}
pub type Time=crate::integer::Time<TimeInner>; pub type Time=crate::integer::Time<TimeInner>;

@ -23,7 +23,7 @@ impl PauseState for Unpaused{
} }
#[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)] #[derive(Clone,Copy,Hash,Eq,PartialEq,PartialOrd,Debug)]
pub enum Inner{} enum Inner{}
type InnerTime=Time<Inner>; type InnerTime=Time<Inner>;
#[derive(Clone,Copy,Debug)] #[derive(Clone,Copy,Debug)]
@ -157,7 +157,7 @@ impl<T:TimerState> TimerFixed<T,Paused>
where Time<T::In>:Copy, where Time<T::In>:Copy,
{ {
pub fn into_unpaused(self,time:Time<T::In>)->TimerFixed<T,Unpaused>{ pub fn into_unpaused(self,time:Time<T::In>)->TimerFixed<T,Unpaused>{
let new_time=self.time(); let new_time=self.time(time);
let mut timer=TimerFixed{ let mut timer=TimerFixed{
state:self.state, state:self.state,
_paused:Unpaused, _paused:Unpaused,
@ -165,9 +165,6 @@ impl<T:TimerState> TimerFixed<T,Paused>
timer.set_time(time,new_time); timer.set_time(time,new_time);
timer timer
} }
pub fn time(&self)->Time<T::Out>{
self.state.get_offset().coerce()
}
} }
impl<T:TimerState> TimerFixed<T,Unpaused> impl<T:TimerState> TimerFixed<T,Unpaused>
where Time<T::In>:Copy, where Time<T::In>:Copy,
@ -181,9 +178,6 @@ impl<T:TimerState> TimerFixed<T,Unpaused>
timer.set_time(time,new_time); timer.set_time(time,new_time);
timer timer
} }
pub fn time(&self,time:Time<T::In>)->Time<T::Out>{
self.state.get_time(time)
}
} }
//the new constructor and time queries are generic across both //the new constructor and time queries are generic across both
@ -205,6 +199,12 @@ impl<T:TimerState,P:PauseState> TimerFixed<T,P>{
pub fn into_state(self)->T{ pub fn into_state(self)->T{
self.state self.state
} }
pub fn time(&self,time:Time<T::In>)->Time<T::Out>{
match P::IS_PAUSED{
true=>self.state.get_offset().coerce(),
false=>self.state.get_time(time),
}
}
pub fn set_time(&mut self,time:Time<T::In>,new_time:Time<T::Out>){ pub fn set_time(&mut self,time:Time<T::In>,new_time:Time<T::Out>){
match P::IS_PAUSED{ match P::IS_PAUSED{
true=>self.state.set_offset(new_time.coerce()), true=>self.state.set_offset(new_time.coerce()),
@ -256,7 +256,7 @@ impl<T:TimerState> Timer<T>
} }
pub fn time(&self,time:Time<T::In>)->Time<T::Out>{ pub fn time(&self,time:Time<T::In>)->Time<T::Out>{
match self{ match self{
Self::Paused(timer)=>timer.time(), Self::Paused(timer)=>timer.time(time),
Self::Unpaused(timer)=>timer.time(time), Self::Unpaused(timer)=>timer.time(time),
} }
} }
@ -329,7 +329,7 @@ mod test{
//create a paused timer that reads 0s //create a paused timer that reads 0s
let timer=TimerFixed::<Scaled<Parent,Calculated>,Paused>::from_state(Scaled::new(0.5f32.try_into().unwrap(),sec!(0))); let timer=TimerFixed::<Scaled<Parent,Calculated>,Paused>::from_state(Scaled::new(0.5f32.try_into().unwrap(),sec!(0)));
//the paused timer at 1 second should read 0s //the paused timer at 1 second should read 0s
assert_eq!(timer.time(),sec!(0)); assert_eq!(timer.time(sec!(1)),sec!(0));
//unpause it after one second //unpause it after one second
let timer=timer.into_unpaused(sec!(1)); let timer=timer.into_unpaused(sec!(1));
@ -339,7 +339,7 @@ mod test{
//pause the timer after 11 seconds //pause the timer after 11 seconds
let timer=timer.into_paused(sec!(11)); let timer=timer.into_paused(sec!(11));
//the paused timer at 20 seconds should read 5s //the paused timer at 20 seconds should read 5s
assert_eq!(timer.time(),sec!(5)); assert_eq!(timer.time(sec!(20)),sec!(5));
} }
#[test] #[test]
fn test_timer()->Result<(),Error>{ fn test_timer()->Result<(),Error>{

@ -0,0 +1,56 @@
pub trait Updatable<Updater>{
fn update(&mut self,update:Updater);
}
#[derive(Clone,Copy,Hash,Eq,PartialEq)]
struct InnerId(u32);
#[derive(Clone)]
struct Inner{
id:InnerId,
enabled:bool,
}
#[derive(Clone,Copy,Hash,Eq,PartialEq)]
struct OuterId(u32);
struct Outer{
id:OuterId,
inners:std::collections::HashMap<InnerId,Inner>,
}
enum Update<I,U>{
Insert(I),
Update(U),
Remove
}
struct InnerUpdate{
//#[updatable(Update)]
enabled:Option<bool>,
}
struct OuterUpdate{
//#[updatable(Insert,Update,Remove)]
inners:std::collections::HashMap<InnerId,Update<Inner,InnerUpdate>>,
//#[updatable(Update)]
//inners:std::collections::HashMap<InnerId,InnerUpdate>,
}
impl Updatable<InnerUpdate> for Inner{
fn update(&mut self,update:InnerUpdate){
if let Some(enabled)=update.enabled{
self.enabled=enabled;
}
}
}
impl Updatable<OuterUpdate> for Outer{
fn update(&mut self,update:OuterUpdate){
for (id,up) in update.inners{
match up{
Update::Insert(new_inner)=>self.inners.insert(id,new_inner),
Update::Update(inner_update)=>self.inners.get_mut(&id).map(|inner|{
let old=inner.clone();
inner.update(inner_update);
old
}),
Update::Remove=>self.inners.remove(&id),
};
}
}
}
//*/

@ -1,7 +1,7 @@
[package] [package]
name = "strafesnet_deferred_loader" name = "strafesnet_deferred_loader"
version = "0.5.0" version = "0.4.1"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Acquire IDs for objects before loading them in bulk." description = "Acquire IDs for objects before loading them in bulk."
@ -9,5 +9,13 @@ authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["legacy"]
legacy = ["dep:url","dep:vbsp"]
#roblox = ["dep:lazy-regex"]
#source = ["dep:vbsp"]
[dependencies] [dependencies]
strafesnet_common = { version = "0.6.0", path = "../common", registry = "strafesnet" } strafesnet_common = { path = "../common", registry = "strafesnet" }
url = { version = "2.5.2", optional = true }
vbsp = { version = "0.6.0", optional = true }

@ -1,116 +0,0 @@
use std::collections::HashMap;
use crate::loader::Loader;
use crate::mesh::Meshes;
use crate::texture::{RenderConfigs,Texture};
use strafesnet_common::model::{Mesh,MeshId,RenderConfig,RenderConfigId,TextureId};
#[derive(Clone,Copy,Debug)]
pub enum LoadFailureMode{
DefaultToNone,
Fatal,
}
pub struct RenderConfigDeferredLoader<H>{
texture_count:u32,
render_configs:Vec<RenderConfig>,
render_config_id_from_asset_id:HashMap<Option<H>,RenderConfigId>,
}
impl<H> RenderConfigDeferredLoader<H>{
pub fn new()->Self{
Self{
texture_count:0,
render_configs:Vec::new(),
render_config_id_from_asset_id:HashMap::new(),
}
}
}
impl<H:core::hash::Hash+Eq> RenderConfigDeferredLoader<H>{
pub fn acquire_render_config_id(&mut self,index:Option<H>)->RenderConfigId{
let some_texture=index.is_some();
*self.render_config_id_from_asset_id.entry(index).or_insert_with(||{
//create the render config.
let render_config=if some_texture{
let render_config=RenderConfig::texture(TextureId::new(self.texture_count));
self.texture_count+=1;
render_config
}else{
RenderConfig::default()
};
let render_id=RenderConfigId::new(self.render_configs.len() as u32);
self.render_configs.push(render_config);
render_id
})
}
pub fn into_indices(self)->impl Iterator<Item=H>{
self.render_config_id_from_asset_id.into_keys().flatten()
}
pub fn into_render_configs<L:Loader<Resource=Texture,Index=H>>(mut self,loader:&mut L,failure_mode:LoadFailureMode)->Result<RenderConfigs,L::Error>{
let mut sorted_textures=vec![None;self.texture_count as usize];
for (index_option,render_config_id) in self.render_config_id_from_asset_id{
let render_config=&mut self.render_configs[render_config_id.get() as usize];
if let (Some(index),Some(texture_id))=(index_option,render_config.texture){
let resource_result=loader.load(index);
let texture=match failure_mode{
// if texture fails to load, use no texture
LoadFailureMode::DefaultToNone=>match resource_result{
Ok(texture)=>Some(texture),
Err(e)=>{
render_config.texture=None;
println!("Error loading texture: {e}");
None
},
},
// loading failure is fatal
LoadFailureMode::Fatal=>Some(resource_result?)
};
sorted_textures[texture_id.get() as usize]=texture;
}
}
Ok(RenderConfigs::new(
sorted_textures,
self.render_configs,
))
}
}
pub struct MeshDeferredLoader<H>{
mesh_id_from_asset_id:HashMap<H,MeshId>,
}
impl<H> MeshDeferredLoader<H>{
pub fn new()->Self{
Self{
mesh_id_from_asset_id:HashMap::new(),
}
}
}
impl<H:core::hash::Hash+Eq> MeshDeferredLoader<H>{
pub fn acquire_mesh_id(&mut self,index:H)->MeshId{
let mesh_id=MeshId::new(self.mesh_id_from_asset_id.len() as u32);
*self.mesh_id_from_asset_id.entry(index).or_insert(mesh_id)
}
pub fn into_indices(self)->impl Iterator<Item=H>{
self.mesh_id_from_asset_id.into_keys()
}
pub fn into_meshes<L:Loader<Resource=Mesh,Index=H>>(self,loader:&mut L,failure_mode:LoadFailureMode)->Result<Meshes,L::Error>{
let mut mesh_list=vec![None;self.mesh_id_from_asset_id.len()];
for (index,mesh_id) in self.mesh_id_from_asset_id{
let resource_result=loader.load(index);
let mesh=match failure_mode{
// if mesh fails to load, use no mesh
LoadFailureMode::DefaultToNone=>match resource_result{
Ok(mesh)=>Some(mesh),
Err(e)=>{
println!("Error loading mesh: {e}");
None
},
},
// loading failure is fatal
LoadFailureMode::Fatal=>Some(resource_result?)
};
mesh_list[mesh_id.get() as usize]=mesh;
}
Ok(Meshes::new(mesh_list))
}
}

@ -1,5 +1,34 @@
#[cfg(feature="legacy")]
mod roblox_legacy;
#[cfg(feature="legacy")]
mod source_legacy;
#[cfg(feature="roblox")]
mod roblox;
#[cfg(feature="source")]
mod source;
#[cfg(any(feature="roblox",feature="legacy"))]
pub mod rbxassetid;
pub mod mesh;
pub mod loader;
pub mod texture; pub mod texture;
pub mod deferred_loader; #[cfg(any(feature="source",feature="legacy"))]
pub mod valve_mesh;
#[cfg(any(feature="roblox",feature="legacy"))]
pub mod roblox_mesh;
#[cfg(feature="legacy")]
pub fn roblox_legacy()->roblox_legacy::Loader{
roblox_legacy::Loader::new()
}
#[cfg(feature="legacy")]
pub fn source_legacy()->source_legacy::Loader{
source_legacy::Loader::new()
}
#[cfg(feature="roblox")]
pub fn roblox()->roblox::Loader{
roblox::Loader::new()
}
#[cfg(feature="source")]
pub fn source()->source::Loader{
source::Loader::new()
}

@ -1,8 +0,0 @@
use std::error::Error;
pub trait Loader{
type Error:Error;
type Index;
type Resource;
fn load(&mut self,index:Self::Index)->Result<Self::Resource,Self::Error>;
}

@ -1,17 +0,0 @@
use strafesnet_common::model::{Mesh,MeshId};
pub struct Meshes{
meshes:Vec<Option<Mesh>>,
}
impl Meshes{
pub(crate) const fn new(meshes:Vec<Option<Mesh>>)->Self{
Self{
meshes,
}
}
pub fn consume(self)->impl Iterator<Item=(MeshId,Mesh)>{
self.meshes.into_iter().enumerate().filter_map(|(mesh_id,maybe_mesh)|
maybe_mesh.map(|mesh|(MeshId::new(mesh_id as u32),mesh))
)
}
}

@ -0,0 +1,48 @@
#[derive(Hash,Eq,PartialEq)]
pub struct RobloxAssetId(pub u64);
#[derive(Debug)]
#[allow(dead_code)]
pub struct StringWithError{
string:String,
error:RobloxAssetIdParseErr,
}
impl std::fmt::Display for StringWithError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for StringWithError{}
impl StringWithError{
const fn new(
string:String,
error:RobloxAssetIdParseErr,
)->Self{
Self{string,error}
}
}
#[derive(Debug)]
pub enum RobloxAssetIdParseErr{
Url(url::ParseError),
UnknownScheme,
ParseInt(std::num::ParseIntError),
MissingAssetId,
}
impl std::str::FromStr for RobloxAssetId{
type Err=StringWithError;
fn from_str(s:&str)->Result<Self,Self::Err>{
let url=url::Url::parse(s).map_err(|e|StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::Url(e)))?;
let parsed_asset_id=match url.scheme(){
"rbxassetid"=>url.domain().ok_or_else(||StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::MissingAssetId))?.parse(),
"http"|"https"=>{
let (_,asset_id)=url.query_pairs()
.find(|(id,_)|match id.as_ref(){
"ID"|"id"|"Id"|"iD"=>true,
_=>false,
}).ok_or_else(||StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::MissingAssetId))?;
asset_id.parse()
},
_=>Err(StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::UnknownScheme))?,
};
Ok(Self(parsed_asset_id.map_err(|e|StringWithError::new(s.to_owned(),RobloxAssetIdParseErr::ParseInt(e)))?))
}
}

@ -0,0 +1,112 @@
use std::io::Read;
use std::collections::HashMap;
use crate::roblox_mesh;
use crate::texture::{RenderConfigs,Texture};
use strafesnet_common::model::{MeshId,RenderConfig,RenderConfigId,TextureId};
use crate::rbxassetid::RobloxAssetId;
#[derive(Default)]
pub struct RenderConfigLoader{
texture_count:u32,
render_configs:Vec<RenderConfig>,
render_config_id_from_asset_id:HashMap<Option<RobloxAssetId>,RenderConfigId>,
}
impl RenderConfigLoader{
pub fn acquire_render_config_id(&mut self,name:Option<&str>)->RenderConfigId{
let render_id=RenderConfigId::new(self.render_config_id_from_asset_id.len() as u32);
let index=name.and_then(|name|{
match name.parse::<RobloxAssetId>(){
Ok(asset_id)=>Some(asset_id),
Err(e)=>{
println!("Failed to parse AssetId: {e}");
None
},
}
});
*self.render_config_id_from_asset_id.entry(index).or_insert_with(||{
//create the render config.
let render_config=if name.is_some(){
let render_config=RenderConfig::texture(TextureId::new(self.texture_count));
self.texture_count+=1;
render_config
}else{
RenderConfig::default()
};
self.render_configs.push(render_config);
render_id
})
}
}
#[derive(Default)]
pub struct MeshLoader{
mesh_id_from_asset_id:HashMap<Option<RobloxAssetId>,MeshId>,
}
impl MeshLoader{
pub fn acquire_mesh_id(&mut self,name:&str)->MeshId{
let mesh_id=MeshId::new(self.mesh_id_from_asset_id.len() as u32);
let index=match name.parse::<RobloxAssetId>(){
Ok(asset_id)=>Some(asset_id),
Err(e)=>{
println!("Failed to parse AssetId: {e}");
None
},
};
*self.mesh_id_from_asset_id.entry(index).or_insert(mesh_id)
}
pub fn load_meshes(&mut self)->Result<roblox_mesh::Meshes,std::io::Error>{
let mut mesh_data=vec![None;self.mesh_id_from_asset_id.len()];
for (asset_id_option,mesh_id) in &self.mesh_id_from_asset_id{
if let Some(asset_id)=asset_id_option{
if let Ok(mut file)=std::fs::File::open(format!("meshes/{}",asset_id.0)){
//TODO: parallel
let mut data=Vec::<u8>::new();
file.read_to_end(&mut data)?;
mesh_data[mesh_id.get() as usize]=Some(roblox_mesh::RobloxMeshData::new(data));
}else{
println!("[roblox_legacy] no mesh name={}",asset_id.0);
}
}
}
Ok(roblox_mesh::Meshes::new(mesh_data))
}
}
pub struct Loader{
render_config_loader:RenderConfigLoader,
mesh_loader:MeshLoader,
}
impl Loader{
pub fn new()->Self{
Self{
render_config_loader:RenderConfigLoader::default(),
mesh_loader:MeshLoader::default(),
}
}
pub fn get_inner_mut(&mut self)->(&mut RenderConfigLoader,&mut MeshLoader){
(&mut self.render_config_loader,&mut self.mesh_loader)
}
pub fn into_render_configs(mut self)->Result<RenderConfigs,std::io::Error>{
let mut sorted_textures=vec![None;self.render_config_loader.texture_count as usize];
for (asset_id_option,render_config_id) in self.render_config_loader.render_config_id_from_asset_id{
let render_config=self.render_config_loader.render_configs.get_mut(render_config_id.get() as usize).unwrap();
if let (Some(asset_id),Some(texture_id))=(asset_id_option,render_config.texture){
if let Ok(mut file)=std::fs::File::open(format!("textures/{}.dds",asset_id.0)){
//TODO: parallel
let mut data=Vec::<u8>::new();
file.read_to_end(&mut data)?;
sorted_textures[texture_id.get() as usize]=Some(Texture::ImageDDS(data));
}else{
//texture failed to load
render_config.texture=None;
}
}
}
Ok(RenderConfigs::new(
sorted_textures,
self.render_config_loader.render_configs,
))
}
}

@ -0,0 +1,30 @@
use strafesnet_common::model::MeshId;
#[derive(Clone)]
pub struct RobloxMeshData(Vec<u8>);
impl RobloxMeshData{
pub(crate) fn new(data:Vec<u8>)->Self{
Self(data)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
pub struct Meshes{
meshes:Vec<Option<RobloxMeshData>>,
}
impl Meshes{
pub(crate) const fn new(meshes:Vec<Option<RobloxMeshData>>)->Self{
Self{
meshes,
}
}
pub fn get_texture(&self,texture_id:MeshId)->Option<&RobloxMeshData>{
self.meshes.get(texture_id.get() as usize)?.as_ref()
}
pub fn into_iter(self)->impl Iterator<Item=(MeshId,RobloxMeshData)>{
self.meshes.into_iter().enumerate().filter_map(|(mesh_id,maybe_mesh)|
maybe_mesh.map(|mesh|(MeshId::new(mesh_id as u32),mesh))
)
}
}

@ -0,0 +1,102 @@
use std::io::Read;
use std::collections::HashMap;
use crate::valve_mesh;
use crate::texture::{Texture,RenderConfigs};
use strafesnet_common::model::{MeshId,TextureId,RenderConfig,RenderConfigId};
pub struct RenderConfigLoader{
texture_count:u32,
render_configs:Vec<RenderConfig>,
texture_paths:HashMap<Option<Box<str>>,RenderConfigId>,
}
impl RenderConfigLoader{
pub fn acquire_render_config_id(&mut self,name:Option<&str>)->RenderConfigId{
let render_id=RenderConfigId::new(self.texture_paths.len() as u32);
*self.texture_paths.entry(name.map(Into::into)).or_insert_with(||{
//create the render config.
let render_config=if name.is_some(){
let render_config=RenderConfig::texture(TextureId::new(self.texture_count));
self.texture_count+=1;
render_config
}else{
RenderConfig::default()
};
self.render_configs.push(render_config);
render_id
})
}
}
pub struct MeshLoader{
mesh_paths:HashMap<Box<str>,MeshId>,
}
impl MeshLoader{
pub fn acquire_mesh_id(&mut self,name:&str)->MeshId{
let mesh_id=MeshId::new(self.mesh_paths.len() as u32);
*self.mesh_paths.entry(name.into()).or_insert(mesh_id)
}
//load_meshes should look like load_textures
pub fn load_meshes(&mut self,bsp:&vbsp::Bsp)->valve_mesh::Meshes{
let mut mesh_data=vec![None;self.mesh_paths.len()];
for (mesh_path,mesh_id) in &self.mesh_paths{
let mesh_path_lower=mesh_path.to_lowercase();
//.mdl, .vvd, .dx90.vtx
let path=std::path::PathBuf::from(mesh_path_lower.as_str());
let mut vvd_path=path.clone();
let mut vtx_path=path.clone();
vvd_path.set_extension("vvd");
vtx_path.set_extension("dx90.vtx");
match (bsp.pack.get(mesh_path_lower.as_str()),bsp.pack.get(vvd_path.as_os_str().to_str().unwrap()),bsp.pack.get(vtx_path.as_os_str().to_str().unwrap())){
(Ok(Some(mdl_file)),Ok(Some(vvd_file)),Ok(Some(vtx_file)))=>{
mesh_data[mesh_id.get() as usize]=Some(valve_mesh::ModelData{
mdl:valve_mesh::MdlData::new(mdl_file),
vtx:valve_mesh::VtxData::new(vtx_file),
vvd:valve_mesh::VvdData::new(vvd_file),
});
},
_=>println!("no model name={}",mesh_path),
}
}
valve_mesh::Meshes::new(mesh_data)
}
}
pub struct Loader{
render_config_loader:RenderConfigLoader,
mesh_loader:MeshLoader,
}
impl Loader{
pub fn new()->Self{
Self{
render_config_loader:RenderConfigLoader{
texture_count:0,
texture_paths:HashMap::new(),
render_configs:Vec::new(),
},
mesh_loader:MeshLoader{mesh_paths:HashMap::new()},
}
}
pub fn get_inner_mut(&mut self)->(&mut RenderConfigLoader,&mut MeshLoader){
(&mut self.render_config_loader,&mut self.mesh_loader)
}
pub fn into_render_configs(mut self)->Result<RenderConfigs,std::io::Error>{
let mut sorted_textures=vec![None;self.render_config_loader.texture_count as usize];
for (texture_path,render_config_id) in self.render_config_loader.texture_paths{
let render_config=self.render_config_loader.render_configs.get_mut(render_config_id.get() as usize).unwrap();
if let (Some(texture_path),Some(texture_id))=(texture_path,render_config.texture){
if let Ok(mut file)=std::fs::File::open(format!("textures/{}.dds",texture_path)){
//TODO: parallel
let mut data=Vec::<u8>::new();
file.read_to_end(&mut data)?;
sorted_textures[texture_id.get() as usize]=Some(Texture::ImageDDS(data));
}else{
//texture failed to load
render_config.texture=None;
}
}
}
Ok(RenderConfigs::new(
sorted_textures,
self.render_config_loader.render_configs,
))
}
}

@ -0,0 +1,60 @@
use strafesnet_common::model::MeshId;
//duplicate this code for now
#[derive(Clone)]
pub struct MdlData(Vec<u8>);
impl MdlData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
#[derive(Clone)]
pub struct VtxData(Vec<u8>);
impl VtxData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
#[derive(Clone)]
pub struct VvdData(Vec<u8>);
impl VvdData{
pub const fn new(value:Vec<u8>)->Self{
Self(value)
}
pub fn get(self)->Vec<u8>{
self.0
}
}
#[derive(Clone)]
pub struct ModelData{
pub mdl:MdlData,
pub vtx:VtxData,
pub vvd:VvdData,
}
//meshes is more prone to failure
pub struct Meshes{
meshes:Vec<Option<ModelData>>,
}
impl Meshes{
pub(crate) const fn new(meshes:Vec<Option<ModelData>>)->Self{
Self{
meshes,
}
}
pub fn get_texture(&self,texture_id:MeshId)->Option<&ModelData>{
self.meshes.get(texture_id.get() as usize)?.as_ref()
}
pub fn into_iter(self)->impl Iterator<Item=(MeshId,ModelData)>{
self.meshes.into_iter().enumerate().filter_map(|(mesh_id,maybe_mesh)|
maybe_mesh.map(|mesh|(MeshId::new(mesh_id as u32),mesh))
)
}
}

@ -1,7 +1,7 @@
[package] [package]
name = "fixed_wide" name = "fixed_wide"
version = "0.2.0" version = "0.1.1"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Fixed point numbers with optional widening Mul operator." description = "Fixed point numbers with optional widening Mul operator."
@ -14,7 +14,7 @@ wide-mul=[]
zeroes=["dep:arrayvec"] zeroes=["dep:arrayvec"]
[dependencies] [dependencies]
bnum = "0.13.0" bnum = "0.12.0"
arrayvec = { version = "0.7.6", optional = true } arrayvec = { version = "0.7.6", optional = true }
paste = "1.0.15" paste = "1.0.15"
ratio_ops = { version = "0.1.0", path = "../ratio_ops", registry = "strafesnet", optional = true } ratio_ops = { path = "../ratio_ops", registry = "strafesnet", optional = true }

@ -1,6 +1,6 @@
use bnum::{BInt,cast::As}; use bnum::{BInt,cast::As};
#[derive(Clone,Copy,Debug,Default,Hash,PartialEq,Eq,PartialOrd,Ord)] #[derive(Clone,Copy,Debug,Default,Hash,PartialEq,PartialOrd,Ord)]
/// A Fixed point number for which multiply operations widen the bits in the output. (when the wide-mul feature is enabled) /// A Fixed point number for which multiply operations widen the bits in the output. (when the wide-mul feature is enabled)
/// N is the number of u64s to use /// N is the number of u64s to use
/// F is the number of fractional bits (always N*32 lol) /// F is the number of fractional bits (always N*32 lol)
@ -33,14 +33,6 @@ impl<const N:usize,const F:usize> Fixed<N,F>{
self.bits self.bits
} }
#[inline] #[inline]
pub const fn as_bits(&self)->&BInt<N>{
&self.bits
}
#[inline]
pub const fn as_bits_mut(&mut self)->&mut BInt<N>{
&mut self.bits
}
#[inline]
pub const fn raw_digit(value:i64)->Self{ pub const fn raw_digit(value:i64)->Self{
let mut digits=[0u64;N]; let mut digits=[0u64;N];
digits[0]=value.abs() as u64; digits[0]=value.abs() as u64;
@ -64,10 +56,6 @@ impl<const N:usize,const F:usize> Fixed<N,F>{
pub const fn abs(self)->Self{ pub const fn abs(self)->Self{
Self::from_bits(self.bits.abs()) Self::from_bits(self.bits.abs())
} }
#[inline]
pub const fn midpoint(self,other:Self)->Self{
Self::from_bits(self.bits.midpoint(other.bits))
}
} }
impl<const F:usize> Fixed<1,F>{ impl<const F:usize> Fixed<1,F>{
/// My old code called this function everywhere so let's provide it /// My old code called this function everywhere so let's provide it
@ -109,6 +97,7 @@ where
self.bits.eq(&other.into()) self.bits.eq(&other.into())
} }
} }
impl<const N:usize,const F:usize> Eq for Fixed<N,F>{}
impl<const N:usize,const F:usize,T> PartialOrd<T> for Fixed<N,F> impl<const N:usize,const F:usize,T> PartialOrd<T> for Fixed<N,F>
where where
@ -663,94 +652,74 @@ macro_repeated!(
1,2,3,4,5,6,7,8 1,2,3,4,5,6,7,8
); );
#[derive(Debug,Eq,PartialEq)] pub trait Fix<Out>{
pub enum NarrowError{ fn fix(self)->Out;
Overflow,
Underflow,
} }
pub trait Wrap<Output>{ macro_rules! impl_fix_rhs_lt_lhs_not_const_generic{
fn wrap(self)->Output;
}
pub trait Clamp<Output>{
fn clamp(self)->Output;
}
impl<const N:usize,const F:usize> Clamp<Fixed<N,F>> for Result<Fixed<N,F>,NarrowError>{
fn clamp(self)->Fixed<N,F>{
match self{
Ok(fixed)=>fixed,
Err(NarrowError::Overflow)=>Fixed::MAX,
Err(NarrowError::Underflow)=>Fixed::MIN,
}
}
}
macro_rules! impl_narrow_not_const_generic{
( (
(), (),
($lhs:expr,$rhs:expr) ($lhs:expr,$rhs:expr)
)=>{ )=>{
paste::item!{
impl Fixed<$lhs,{$lhs*32}> impl Fixed<$lhs,{$lhs*32}>
{ {
paste::item!{
#[inline] #[inline]
pub fn [<wrap_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{ pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits.shr(($lhs-$rhs)*32))) Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits.shr(($lhs-$rhs)*32)))
} }
#[inline]
pub fn [<narrow_ $rhs>](self)->Result<Fixed<$rhs,{$rhs*32}>,NarrowError>{
if Fixed::<$rhs,{$rhs*32}>::MAX.[<widen_ $lhs>]().bits<self.bits{
return Err(NarrowError::Overflow);
}
if self.bits<Fixed::<$rhs,{$rhs*32}>::MIN.[<widen_ $lhs>]().bits{
return Err(NarrowError::Underflow);
}
Ok(self.[<wrap_ $rhs>]())
}
#[inline]
pub fn [<clamp_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
self.[<narrow_ $rhs>]().clamp()
} }
} }
impl Wrap<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{ impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
#[inline] fn fix(self)->Fixed<$rhs,{$rhs*32}>{
fn wrap(self)->Fixed<$rhs,{$rhs*32}>{ paste::item!{
self.[<wrap_ $rhs>]() self.[<fix_ $rhs>]()
}
}
impl TryInto<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
type Error=NarrowError;
#[inline]
fn try_into(self)->Result<Fixed<$rhs,{$rhs*32}>,Self::Error>{
self.[<narrow_ $rhs>]()
}
}
impl Clamp<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
#[inline]
fn clamp(self)->Fixed<$rhs,{$rhs*32}>{
self.[<clamp_ $rhs>]()
} }
} }
} }
} }
} }
macro_rules! impl_widen_not_const_generic{ macro_rules! impl_fix_lhs_lt_rhs_not_const_generic{
( (
(), (),
($lhs:expr,$rhs:expr) ($lhs:expr,$rhs:expr)
)=>{ )=>{
paste::item!{
impl Fixed<$lhs,{$lhs*32}> impl Fixed<$lhs,{$lhs*32}>
{ {
paste::item!{
#[inline] #[inline]
pub fn [<widen_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{ pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits).shl(($rhs-$lhs)*32)) Fixed::from_bits(bnum::cast::As::as_::<BInt::<$rhs>>(self.bits).shl(($rhs-$lhs)*32))
} }
} }
impl Into<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{ }
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
}
}
}
}
}
macro_rules! impl_fix_lhs_eq_rhs_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
impl Fixed<$lhs,{$lhs*32}>
{
paste::item!{
#[inline] #[inline]
fn into(self)->Fixed<$rhs,{$rhs*32}>{ pub fn [<fix_ $rhs>](self)->Fixed<$rhs,{$rhs*32}>{
self.[<widen_ $rhs>]() self
}
}
}
impl Fix<Fixed<$rhs,{$rhs*32}>> for Fixed<$lhs,{$lhs*32}>{
fn fix(self)->Fixed<$rhs,{$rhs*32}>{
paste::item!{
self.[<fix_ $rhs>]()
} }
} }
} }
@ -760,7 +729,7 @@ macro_rules! impl_widen_not_const_generic{
// I LOVE NOT BEING ABLE TO USE CONST GENERICS // I LOVE NOT BEING ABLE TO USE CONST GENERICS
macro_repeated!( macro_repeated!(
impl_narrow_not_const_generic,(), impl_fix_rhs_lt_lhs_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1), (2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1),
(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2), (3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3), (4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
@ -778,7 +747,7 @@ macro_repeated!(
(16,15) (16,15)
); );
macro_repeated!( macro_repeated!(
impl_widen_not_const_generic,(), impl_fix_lhs_lt_rhs_not_const_generic,(),
(1,2), (1,2),
(1,3),(2,3), (1,3),(2,3),
(1,4),(2,4),(3,4), (1,4),(2,4),(3,4),
@ -793,8 +762,11 @@ macro_repeated!(
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13), (1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14), (1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15), (1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16), (1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16)
(1,17) );
macro_repeated!(
impl_fix_lhs_eq_rhs_not_const_generic,(),
(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16)
); );
macro_rules! impl_not_const_generic{ macro_rules! impl_not_const_generic{
@ -814,13 +786,16 @@ macro_rules! impl_not_const_generic{
let mut result=Self::ZERO; let mut result=Self::ZERO;
//resize self to match the wide mul output //resize self to match the wide mul output
let wide_self=self.[<widen_ $_2n>](); let wide_self=self.[<fix_ $_2n>]();
//descend down the bits and check if flipping each bit would push the square over the input value //descend down the bits and check if flipping each bit would push the square over the input value
for shift in (0..=max_shift).rev(){ for shift in (0..=max_shift).rev(){
result.as_bits_mut().as_bits_mut().set_bit(shift,true); let new_result={
if wide_self<result.[<wide_mul_ $n _ $n>](result){ let mut bits=result.to_bits().to_bits();
// put it back lol bits.set_bit(shift,true);
result.as_bits_mut().as_bits_mut().set_bit(shift,false); Self::from_bits(BInt::from_bits(bits))
};
if new_result.[<wide_mul_ $n _ $n>](new_result)<=wide_self{
result=new_result;
} }
} }
result result

@ -57,11 +57,11 @@ fn from_f32(){
assert_eq!(b,Ok(a)); assert_eq!(b,Ok(a));
//I32F32::MIN hits a special case since it's not representable as a positive signed integer //I32F32::MIN hits a special case since it's not representable as a positive signed integer
//TODO: don't return an overflow because this is technically possible //TODO: don't return an overflow because this is technically possible
let _a=I32F32::MIN; let a=I32F32::MIN;
let b:Result<I32F32,_>=Into::<f32>::into(I32F32::MIN).try_into(); let b:Result<I32F32,_>=Into::<f32>::into(I32F32::MIN).try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow)); assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
//16 is within the 24 bits of float precision //16 is within the 24 bits of float precision
let b:Result<I32F32,_>=Into::<f32>::into(-I32F32::MIN.widen_2()).try_into(); let b:Result<I32F32,_>=Into::<f32>::into(-I32F32::MIN.fix_2()).try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow)); assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Overflow));
let b:Result<I32F32,_>=f32::MIN_POSITIVE.try_into(); let b:Result<I32F32,_>=f32::MIN_POSITIVE.try_into();
assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Underflow)); assert_eq!(b,Err(crate::fixed::FixedFromFloatError::Underflow));
@ -136,24 +136,11 @@ fn test_bint(){
} }
#[test] #[test]
fn test_wrap(){ fn test_fix(){
assert_eq!(I32F32::ONE,I256F256::ONE.wrap_1()); assert_eq!(I32F32::ONE.fix_8(),I256F256::ONE);
assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.wrap_1()); assert_eq!(I32F32::ONE,I256F256::ONE.fix_1());
} assert_eq!(I32F32::NEG_ONE.fix_8(),I256F256::NEG_ONE);
#[test] assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.fix_1());
fn test_narrow(){
assert_eq!(Ok(I32F32::ONE),I256F256::ONE.narrow_1());
assert_eq!(Ok(I32F32::NEG_ONE),I256F256::NEG_ONE.narrow_1());
}
#[test]
fn test_widen(){
assert_eq!(I32F32::ONE.widen_8(),I256F256::ONE);
assert_eq!(I32F32::NEG_ONE.widen_8(),I256F256::NEG_ONE);
}
#[test]
fn test_clamp(){
assert_eq!(I32F32::ONE,I256F256::ONE.clamp_1());
assert_eq!(I32F32::NEG_ONE,I256F256::NEG_ONE.clamp_1());
} }
#[test] #[test]
fn test_sqrt(){ fn test_sqrt(){

@ -15,10 +15,8 @@ macro_rules! impl_zeroes{
let radicand=a1*a1-a2*a0*4; let radicand=a1*a1-a2*a0*4;
match radicand.cmp(&<Self as core::ops::Mul>::Output::ZERO){ match radicand.cmp(&<Self as core::ops::Mul>::Output::ZERO){
Ordering::Greater=>{ Ordering::Greater=>{
// using wrap because sqrt always halves the number of leading digits.
// clamp would be more defensive, but is slower.
paste::item!{ paste::item!{
let planar_radicand=radicand.sqrt().[<wrap_ $n>](); let planar_radicand=radicand.sqrt().[<fix_ $n>]();
} }
//sort roots ascending and avoid taking the difference of large numbers //sort roots ascending and avoid taking the difference of large numbers
let zeroes=match (a2pos,Self::ZERO<a1){ let zeroes=match (a2pos,Self::ZERO<a1){

@ -1,7 +1,7 @@
[package] [package]
name = "linear_ops" name = "linear_ops"
version = "0.1.1" version = "0.1.0"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Vector/Matrix operations using trait bounds." description = "Vector/Matrix operations using trait bounds."
@ -14,8 +14,8 @@ fixed-wide=["dep:fixed_wide","dep:paste"]
deferred-division=["dep:ratio_ops"] deferred-division=["dep:ratio_ops"]
[dependencies] [dependencies]
ratio_ops = { version = "0.1.0", path = "../ratio_ops", registry = "strafesnet", optional = true } ratio_ops = { path = "../ratio_ops", registry = "strafesnet", optional = true }
fixed_wide = { version = "0.2.0", path = "../fixed_wide", registry = "strafesnet", optional = true } fixed_wide = { path = "../fixed_wide", registry = "strafesnet", optional = true }
paste = { version = "1.0.15", optional = true } paste = { version = "1.0.15", optional = true }
[dev-dependencies] [dev-dependencies]

@ -38,95 +38,40 @@ macro_rules! impl_fixed_wide_vector {
$crate::macro_4!(impl_fixed_wide_vector_not_const_generic,()); $crate::macro_4!(impl_fixed_wide_vector_not_const_generic,());
// I LOVE NOT BEING ABLE TO USE CONST GENERICS // I LOVE NOT BEING ABLE TO USE CONST GENERICS
$crate::macro_repeated!( $crate::macro_repeated!(
impl_narrow_not_const_generic,(), impl_fix_not_const_generic,(),
(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),(17,1), (1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1),(10,1),(11,1),(12,1),(13,1),(14,1),(15,1),(16,1),
(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2), (1,2),(2,2),(3,2),(4,2),(5,2),(6,2),(7,2),(8,2),(9,2),(10,2),(11,2),(12,2),(13,2),(14,2),(15,2),(16,2),
(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3), (1,3),(2,3),(3,3),(4,3),(5,3),(6,3),(7,3),(8,3),(9,3),(10,3),(11,3),(12,3),(13,3),(14,3),(15,3),(16,3),
(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4), (1,4),(2,4),(3,4),(4,4),(5,4),(6,4),(7,4),(8,4),(9,4),(10,4),(11,4),(12,4),(13,4),(14,4),(15,4),(16,4),
(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5), (1,5),(2,5),(3,5),(4,5),(5,5),(6,5),(7,5),(8,5),(9,5),(10,5),(11,5),(12,5),(13,5),(14,5),(15,5),(16,5),
(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6), (1,6),(2,6),(3,6),(4,6),(5,6),(6,6),(7,6),(8,6),(9,6),(10,6),(11,6),(12,6),(13,6),(14,6),(15,6),(16,6),
(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7), (1,7),(2,7),(3,7),(4,7),(5,7),(6,7),(7,7),(8,7),(9,7),(10,7),(11,7),(12,7),(13,7),(14,7),(15,7),(16,7),
(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8), (1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),(8,8),(9,8),(10,8),(11,8),(12,8),(13,8),(14,8),(15,8),(16,8),
(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9), (1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),(9,9),(10,9),(11,9),(12,9),(13,9),(14,9),(15,9),(16,9),
(11,10),(12,10),(13,10),(14,10),(15,10),(16,10), (1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),(10,10),(11,10),(12,10),(13,10),(14,10),(15,10),(16,10),
(12,11),(13,11),(14,11),(15,11),(16,11), (1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),(11,11),(12,11),(13,11),(14,11),(15,11),(16,11),
(13,12),(14,12),(15,12),(16,12), (1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),(12,12),(13,12),(14,12),(15,12),(16,12),
(14,13),(15,13),(16,13), (1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),(13,13),(14,13),(15,13),(16,13),
(15,14),(16,14), (1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),(14,14),(15,14),(16,14),
(16,15) (1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),(15,15),(16,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),(16,16)
); );
$crate::macro_repeated!(
impl_widen_not_const_generic,(),
(1,2),
(1,3),(2,3),
(1,4),(2,4),(3,4),
(1,5),(2,5),(3,5),(4,5),
(1,6),(2,6),(3,6),(4,6),(5,6),
(1,7),(2,7),(3,7),(4,7),(5,7),(6,7),
(1,8),(2,8),(3,8),(4,8),(5,8),(6,8),(7,8),
(1,9),(2,9),(3,9),(4,9),(5,9),(6,9),(7,9),(8,9),
(1,10),(2,10),(3,10),(4,10),(5,10),(6,10),(7,10),(8,10),(9,10),
(1,11),(2,11),(3,11),(4,11),(5,11),(6,11),(7,11),(8,11),(9,11),(10,11),
(1,12),(2,12),(3,12),(4,12),(5,12),(6,12),(7,12),(8,12),(9,12),(10,12),(11,12),
(1,13),(2,13),(3,13),(4,13),(5,13),(6,13),(7,13),(8,13),(9,13),(10,13),(11,13),(12,13),
(1,14),(2,14),(3,14),(4,14),(5,14),(6,14),(7,14),(8,14),(9,14),(10,14),(11,14),(12,14),(13,14),
(1,15),(2,15),(3,15),(4,15),(5,15),(6,15),(7,15),(8,15),(9,15),(10,15),(11,15),(12,15),(13,15),(14,15),
(1,16),(2,16),(3,16),(4,16),(5,16),(6,16),(7,16),(8,16),(9,16),(10,16),(11,16),(12,16),(13,16),(14,16),(15,16),
(1,17)
);
impl<const N:usize,T:fixed_wide::fixed::Wrap<U>,U> fixed_wide::fixed::Wrap<Vector<N,U>> for Vector<N,T>
{
#[inline]
fn wrap(self)->Vector<N,U>{
self.map(|t|t.wrap())
}
}
impl<const N:usize,T:fixed_wide::fixed::Clamp<U>,U> fixed_wide::fixed::Clamp<Vector<N,U>> for Vector<N,T>
{
#[inline]
fn clamp(self)->Vector<N,U>{
self.map(|t|t.clamp())
}
}
}; };
} }
#[doc(hidden)] #[doc(hidden)]
#[macro_export(local_inner_macros)] #[macro_export(local_inner_macros)]
macro_rules! impl_narrow_not_const_generic{ macro_rules! impl_fix_not_const_generic{
( (
(), (),
($lhs:expr,$rhs:expr) ($lhs:expr,$rhs:expr)
)=>{ )=>{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>
{
paste::item!{ paste::item!{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>{
#[inline] #[inline]
pub fn [<wrap_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{ pub fn [<fix_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
self.map(|t|t.[<wrap_ $rhs>]()) self.map(|t|t.[<fix_ $rhs>]())
}
#[inline]
pub fn [<narrow_ $rhs>](self)->Vector<N,Result<fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>,fixed_wide::fixed::NarrowError>>{
self.map(|t|t.[<narrow_ $rhs>]())
}
#[inline]
pub fn [<clamp_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
self.map(|t|t.[<clamp_ $rhs>]())
}
}
}
}
}
#[doc(hidden)]
#[macro_export(local_inner_macros)]
macro_rules! impl_widen_not_const_generic{
(
(),
($lhs:expr,$rhs:expr)
)=>{
paste::item!{
impl<const N:usize> Vector<N,fixed_wide::fixed::Fixed<$lhs,{$lhs*32}>>{
#[inline]
pub fn [<widen_ $rhs>](self)->Vector<N,fixed_wide::fixed::Fixed<$rhs,{$rhs*32}>>{
self.map(|t|t.[<widen_ $rhs>]())
} }
} }
} }

@ -58,15 +58,6 @@ macro_rules! impl_vector {
} }
} }
impl<const N:usize,T,E:std::fmt::Debug> Vector<N,Result<T,E>>{
#[inline]
pub fn unwrap(self)->Vector<N,T>{
Vector{
array:self.array.map(Result::unwrap)
}
}
}
impl<const N:usize,T:Ord> Vector<N,T>{ impl<const N:usize,T:Ord> Vector<N,T>{
#[inline] #[inline]
pub fn min(self,rhs:Self)->Self{ pub fn min(self,rhs:Self)->Self{

@ -1,6 +1,5 @@
use crate::vector::Vector; use crate::vector::Vector;
#[repr(transparent)]
#[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)] #[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)]
pub struct Matrix<const X:usize,const Y:usize,T>{ pub struct Matrix<const X:usize,const Y:usize,T>{
pub(crate) array:[[T;Y];X], pub(crate) array:[[T;Y];X],

@ -3,7 +3,6 @@
/// v.x += v.z; /// v.x += v.z;
/// println!("v.x={}",v.x); /// println!("v.x={}",v.x);
#[repr(transparent)]
#[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)] #[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)]
pub struct Vector<const N:usize,T>{ pub struct Vector<const N:usize,T>{
pub(crate) array:[T;N], pub(crate) array:[T;N],

@ -1,7 +1,7 @@
[package] [package]
name = "ratio_ops" name = "ratio_ops"
version = "0.1.1" version = "0.1.0"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Ratio operations using trait bounds for avoiding division like the plague." description = "Ratio operations using trait bounds for avoiding division like the plague."

@ -268,35 +268,30 @@ impl<LhsNum,LhsDen,RhsNum,RhsDen,T,U> PartialEq<Ratio<RhsNum,RhsDen>> for Ratio<
} }
impl<Num,Den> Eq for Ratio<Num,Den> where Self:PartialEq{} impl<Num,Den> Eq for Ratio<Num,Den> where Self:PartialEq{}
// Wow! These were both completely wrong!
// Idea: use a 'signed' trait instead of parity and float the sign to the numerator.
impl<LhsNum,LhsDen,RhsNum,RhsDen,T,U> PartialOrd<Ratio<RhsNum,RhsDen>> for Ratio<LhsNum,LhsDen> impl<LhsNum,LhsDen,RhsNum,RhsDen,T,U> PartialOrd<Ratio<RhsNum,RhsDen>> for Ratio<LhsNum,LhsDen>
where where
LhsNum:Copy, LhsNum:Copy,
LhsDen:Copy+Parity, LhsDen:Copy,
RhsNum:Copy, RhsNum:Copy,
RhsDen:Copy+Parity, RhsDen:Copy,
LhsNum:core::ops::Mul<RhsDen,Output=T>, LhsNum:core::ops::Mul<RhsDen,Output=T>,
LhsDen:core::ops::Mul<RhsNum,Output=T>,
RhsNum:core::ops::Mul<LhsDen,Output=U>, RhsNum:core::ops::Mul<LhsDen,Output=U>,
RhsDen:core::ops::Mul<LhsNum,Output=U>, T:PartialOrd<U>,
T:PartialOrd<U>+Ord,
{ {
#[inline] #[inline]
fn partial_cmp(&self,&other:&Ratio<RhsNum,RhsDen>)->Option<core::cmp::Ordering>{ fn partial_cmp(&self,other:&Ratio<RhsNum,RhsDen>)->Option<core::cmp::Ordering>{
self.partial_cmp_ratio(other) (self.num*other.den).partial_cmp(&(other.num*self.den))
} }
} }
impl<Num,Den,T> Ord for Ratio<Num,Den> impl<Num,Den,T> Ord for Ratio<Num,Den>
where where
Num:Copy, Num:Copy,
Den:Copy+Parity, Den:Copy,
Num:core::ops::Mul<Den,Output=T>, Num:core::ops::Mul<Den,Output=T>,
Den:core::ops::Mul<Num,Output=T>,
T:Ord, T:Ord,
{ {
#[inline] #[inline]
fn cmp(&self,&other:&Self)->std::cmp::Ordering{ fn cmp(&self,other:&Self)->std::cmp::Ordering{
self.cmp_ratio(other) (self.num*other.den).cmp(&(other.num*self.den))
} }
} }

@ -1,7 +1,7 @@
[package] [package]
name = "strafesnet_rbx_loader" name = "strafesnet_rbx_loader"
version = "0.6.0" version = "0.5.2"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Convert Roblox place and model files to StrafesNET data structures." description = "Convert Roblox place and model files to StrafesNET data structures."
@ -11,14 +11,12 @@ authors = ["Rhys Lloyd <krakow20@gmail.com>"]
[dependencies] [dependencies]
bytemuck = "1.14.3" bytemuck = "1.14.3"
glam = "0.30.0" glam = "0.29.0"
lazy-regex = "3.1.0" lazy-regex = "3.1.0"
rbx_binary = { version = "0.7.4", registry = "strafesnet" } rbx_binary = { version = "0.7.4", registry = "strafesnet" }
rbx_dom_weak = { version = "2.7.0", registry = "strafesnet" } rbx_dom_weak = { version = "2.7.0", registry = "strafesnet" }
rbx_mesh = "0.3.1" rbx_mesh = "0.1.2"
rbx_reflection_database = { version = "0.2.10", registry = "strafesnet" } rbx_reflection_database = { version = "0.2.10", registry = "strafesnet" }
rbx_xml = { version = "0.13.3", registry = "strafesnet" } rbx_xml = { version = "0.13.3", registry = "strafesnet" }
rbxassetid = { version = "0.1.0", path = "../rbxassetid", registry = "strafesnet" } roblox_emulator = { path = "../roblox_emulator", registry = "strafesnet" }
roblox_emulator = { version = "0.4.7", path = "../roblox_emulator", registry = "strafesnet" } strafesnet_common = { path = "../common", registry = "strafesnet" }
strafesnet_common = { version = "0.6.0", path = "../common", registry = "strafesnet" }
strafesnet_deferred_loader = { version = "0.5.0", path = "../deferred_loader", registry = "strafesnet" }

@ -1,11 +1,8 @@
use std::io::Read; use std::io::Read;
use rbx_dom_weak::WeakDom; use rbx_dom_weak::WeakDom;
use strafesnet_deferred_loader::deferred_loader::{LoadFailureMode,MeshDeferredLoader,RenderConfigDeferredLoader};
mod rbx; mod rbx;
mod mesh; mod mesh;
mod union;
pub mod loader;
mod primitives; mod primitives;
pub mod data{ pub mod data{
@ -33,9 +30,6 @@ impl Model{
let services=context.convert_into_place(); let services=context.convert_into_place();
Place{dom,services} Place{dom,services}
} }
pub fn to_snf(&self,failure_mode:LoadFailureMode)->Result<strafesnet_common::map::CompleteMap,LoadError>{
to_snf(self,failure_mode)
}
} }
impl AsRef<WeakDom> for Model{ impl AsRef<WeakDom> for Model{
fn as_ref(&self)->&WeakDom{ fn as_ref(&self)->&WeakDom{
@ -48,7 +42,7 @@ pub struct Place{
services:roblox_emulator::context::Services, services:roblox_emulator::context::Services,
} }
impl Place{ impl Place{
pub fn new(dom:WeakDom)->Option<Self>{ fn new(dom:WeakDom)->Option<Self>{
let context=roblox_emulator::context::Context::from_ref(&dom); let context=roblox_emulator::context::Context::from_ref(&dom);
Some(Self{ Some(Self{
services:context.find_services()?, services:context.find_services()?,
@ -67,9 +61,6 @@ impl Place{
} }
} }
} }
pub fn to_snf(&self,failure_mode:LoadFailureMode)->Result<strafesnet_common::map::CompleteMap,LoadError>{
to_snf(self,failure_mode)
}
} }
impl AsRef<WeakDom> for Place{ impl AsRef<WeakDom> for Place{
fn as_ref(&self)->&WeakDom{ fn as_ref(&self)->&WeakDom{
@ -101,49 +92,16 @@ pub fn read<R:Read>(input:R)->Result<Model,ReadError>{
} }
} }
#[derive(Debug)] //ConvertError
pub enum LoadError{
Texture(loader::TextureError), pub fn convert<AcquireRenderConfigId,AcquireMeshId>(
Mesh(loader::MeshError), dom:impl AsRef<WeakDom>,
} acquire_render_config_id:AcquireRenderConfigId,
impl std::fmt::Display for LoadError{ acquire_mesh_id:AcquireMeshId
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{ )->rbx::PartialMap1
write!(f,"{self:?}") where
} AcquireRenderConfigId:FnMut(Option<&str>)->strafesnet_common::model::RenderConfigId,
} AcquireMeshId:FnMut(&str)->strafesnet_common::model::MeshId,
impl std::error::Error for LoadError{} {
impl From<loader::TextureError> for LoadError{ rbx::convert(&dom.as_ref(),acquire_render_config_id,acquire_mesh_id)
fn from(value:loader::TextureError)->Self{
Self::Texture(value)
}
}
impl From<loader::MeshError> for LoadError{
fn from(value:loader::MeshError)->Self{
Self::Mesh(value)
}
}
fn to_snf(dom:impl AsRef<WeakDom>,failure_mode:LoadFailureMode)->Result<strafesnet_common::map::CompleteMap,LoadError>{
let dom=dom.as_ref();
let mut texture_deferred_loader=RenderConfigDeferredLoader::new();
let mut mesh_deferred_loader=MeshDeferredLoader::new();
let map_step1=rbx::convert(
dom,
&mut texture_deferred_loader,
&mut mesh_deferred_loader,
);
let mut mesh_loader=loader::MeshLoader::new();
let meshpart_meshes=mesh_deferred_loader.into_meshes(&mut mesh_loader,failure_mode).map_err(LoadError::Mesh)?;
let map_step2=map_step1.add_meshpart_meshes_and_calculate_attributes(meshpart_meshes);
let mut texture_loader=loader::TextureLoader::new();
let render_configs=texture_deferred_loader.into_render_configs(&mut texture_loader,failure_mode).map_err(LoadError::Texture)?;
let map=map_step2.add_render_configs_and_textures(render_configs);
Ok(map)
} }

@ -1,191 +0,0 @@
use std::io::Read;
use rbxassetid::{RobloxAssetId,RobloxAssetIdParseErr};
use strafesnet_common::model::Mesh;
use strafesnet_deferred_loader::{loader::Loader,texture::Texture};
use crate::data::RobloxMeshBytes;
use crate::rbx::RobloxPartDescription;
fn read_entire_file(path:impl AsRef<std::path::Path>)->Result<Vec<u8>,std::io::Error>{
let mut file=std::fs::File::open(path)?;
let mut data=Vec::new();
file.read_to_end(&mut data)?;
Ok(data)
}
#[allow(dead_code)]
#[derive(Debug)]
pub enum TextureError{
Io(std::io::Error),
RobloxAssetIdParse(RobloxAssetIdParseErr),
}
impl std::fmt::Display for TextureError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for TextureError{}
impl From<std::io::Error> for TextureError{
fn from(value:std::io::Error)->Self{
Self::Io(value)
}
}
impl From<RobloxAssetIdParseErr> for TextureError{
fn from(value:RobloxAssetIdParseErr)->Self{
Self::RobloxAssetIdParse(value)
}
}
pub struct TextureLoader<'a>(std::marker::PhantomData<&'a ()>);
impl TextureLoader<'_>{
pub fn new()->Self{
Self(std::marker::PhantomData)
}
}
impl<'a> Loader for TextureLoader<'a>{
type Error=TextureError;
type Index=&'a str;
type Resource=Texture;
fn load(&mut self,index:Self::Index)->Result<Self::Resource,Self::Error>{
let RobloxAssetId(asset_id)=index.parse()?;
let file_name=format!("textures/{}.dds",asset_id);
let data=read_entire_file(file_name)?;
Ok(Texture::ImageDDS(data))
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub enum MeshError{
Io(std::io::Error),
RobloxAssetIdParse(RobloxAssetIdParseErr),
Mesh(crate::mesh::Error),
Union(crate::union::Error),
DecodeBinary(rbx_binary::DecodeError),
OneChildPolicy,
MissingInstance,
}
impl std::fmt::Display for MeshError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for MeshError{}
impl From<std::io::Error> for MeshError{
fn from(value:std::io::Error)->Self{
Self::Io(value)
}
}
impl From<RobloxAssetIdParseErr> for MeshError{
fn from(value:RobloxAssetIdParseErr)->Self{
Self::RobloxAssetIdParse(value)
}
}
impl From<crate::mesh::Error> for MeshError{
fn from(value:crate::mesh::Error)->Self{
Self::Mesh(value)
}
}
impl From<crate::union::Error> for MeshError{
fn from(value:crate::union::Error)->Self{
Self::Union(value)
}
}
impl From<rbx_binary::DecodeError> for MeshError{
fn from(value:rbx_binary::DecodeError)->Self{
Self::DecodeBinary(value)
}
}
#[derive(Hash,Eq,PartialEq)]
pub enum MeshType<'a>{
FileMesh,
Union{
mesh_data:&'a [u8],
physics_data:&'a [u8],
size_float_bits:[u32;3],
part_texture_description:RobloxPartDescription,
},
}
#[derive(Hash,Eq,PartialEq)]
pub struct MeshIndex<'a>{
mesh_type:MeshType<'a>,
content:&'a str,
}
impl MeshIndex<'_>{
pub fn file_mesh(content:&str)->MeshIndex{
MeshIndex{
mesh_type:MeshType::FileMesh,
content,
}
}
pub fn union<'a>(
content:&'a str,
mesh_data:&'a [u8],
physics_data:&'a [u8],
size:&rbx_dom_weak::types::Vector3,
part_texture_description:RobloxPartDescription,
)->MeshIndex<'a>{
MeshIndex{
mesh_type:MeshType::Union{
mesh_data,
physics_data,
size_float_bits:[size.x.to_bits(),size.y.to_bits(),size.z.to_bits()],
part_texture_description,
},
content,
}
}
}
pub struct MeshLoader<'a>(std::marker::PhantomData<&'a ()>);
impl MeshLoader<'_>{
pub fn new()->Self{
Self(std::marker::PhantomData)
}
}
impl<'a> Loader for MeshLoader<'a>{
type Error=MeshError;
type Index=MeshIndex<'a>;
type Resource=Mesh;
fn load(&mut self,index:Self::Index)->Result<Self::Resource,Self::Error>{
let mesh=match index.mesh_type{
MeshType::FileMesh=>{
let RobloxAssetId(asset_id)=index.content.parse()?;
let file_name=format!("meshes/{}",asset_id);
let data=read_entire_file(file_name)?;
crate::mesh::convert(RobloxMeshBytes::new(data))?
},
MeshType::Union{mut physics_data,mut mesh_data,size_float_bits,part_texture_description}=>{
// decode asset
let size=glam::Vec3::from_array(size_float_bits.map(f32::from_bits));
if !index.content.is_empty()&&(physics_data.is_empty()||mesh_data.is_empty()){
let RobloxAssetId(asset_id)=index.content.parse()?;
let file_name=format!("unions/{}",asset_id);
let data=read_entire_file(file_name)?;
let dom=rbx_binary::from_reader(std::io::Cursor::new(data))?;
let &[referent]=dom.root().children()else{
return Err(MeshError::OneChildPolicy);
};
let Some(instance)=dom.get_by_ref(referent)else{
return Err(MeshError::MissingInstance);
};
if physics_data.is_empty(){
if let Some(rbx_dom_weak::types::Variant::BinaryString(data))=instance.properties.get("PhysicsData"){
physics_data=data.as_ref();
}
}
if mesh_data.is_empty(){
if let Some(rbx_dom_weak::types::Variant::BinaryString(data))=instance.properties.get("MeshData"){
mesh_data=data.as_ref();
}
}
crate::union::convert(physics_data,mesh_data,size,part_texture_description)?
}else{
crate::union::convert(physics_data,mesh_data,size,part_texture_description)?
}
},
};
Ok(mesh)
}
}

@ -1,9 +1,8 @@
use std::collections::HashMap; use std::collections::HashMap;
use rbx_mesh::mesh::{Vertex2,Vertex2Truncated}; use rbx_mesh::mesh::{Vertex2, Vertex2Truncated};
use strafesnet_common::{integer::vec3,model::{self,ColorId,IndexedVertex,NormalId,PolygonGroup,PolygonList,PositionId,RenderConfigId,TextureCoordinateId,VertexId}}; use strafesnet_common::{integer::vec3,model::{self, ColorId, IndexedVertex, NormalId, PolygonGroup, PolygonList, PositionId, TextureCoordinateId, VertexId}};
#[allow(dead_code)]
#[derive(Debug)] #[derive(Debug)]
pub enum Error{ pub enum Error{
Planar64Vec3(strafesnet_common::integer::Planar64TryFromFloatError), Planar64Vec3(strafesnet_common::integer::Planar64TryFromFloatError),
@ -84,13 +83,13 @@ where
fn ingest_faces2_lods3( fn ingest_faces2_lods3(
polygon_groups:&mut Vec<PolygonGroup>, polygon_groups:&mut Vec<PolygonGroup>,
vertex_id_map:&HashMap<rbx_mesh::mesh::VertexId2,VertexId>, vertex_id_map:&HashMap<rbx_mesh::mesh::VertexId2,VertexId>,
faces:&[rbx_mesh::mesh::Face2], faces:&Vec<rbx_mesh::mesh::Face2>,
lods:&[rbx_mesh::mesh::Lod3], lods:&Vec<rbx_mesh::mesh::Lod3>
){ ){
//faces have to be split into polygon groups based on lod //faces have to be split into polygon groups based on lod
polygon_groups.extend(lods.windows(2).map(|lod_pair| polygon_groups.extend(lods.windows(2).map(|lod_pair|
PolygonGroup::PolygonList(PolygonList::new(faces[lod_pair[0].0 as usize..lod_pair[1].0 as usize].iter().map(|rbx_mesh::mesh::Face2(v0,v1,v2)| PolygonGroup::PolygonList(PolygonList::new(faces[lod_pair[0].0 as usize..lod_pair[1].0 as usize].iter().map(|face|
vec![vertex_id_map[&v0],vertex_id_map[&v1],vertex_id_map[&v2]] vec![vertex_id_map[&face.0],vertex_id_map[&face.1],vertex_id_map[&face.2]]
).collect())) ).collect()))
)) ))
} }
@ -205,13 +204,7 @@ pub fn convert(roblox_mesh_bytes:crate::data::RobloxMeshBytes)->Result<model::Me
unique_vertices, unique_vertices,
polygon_groups, polygon_groups,
//these should probably be moved to the model... //these should probably be moved to the model...
//but what if models want to use the same texture graphics_groups:Vec::new(),
graphics_groups:vec![model::IndexedGraphicsGroup{
render:RenderConfigId::new(0),
//the lowest lod is highest quality
groups:vec![model::PolygonGroupId::new(0)]
}],
//disable physics
physics_groups:Vec::new(), physics_groups:Vec::new(),
}) })
} }

@ -1,6 +1,5 @@
use crate::rbx::{RobloxPartDescription,RobloxWedgeDescription,RobloxCornerWedgeDescription}; use strafesnet_common::model::{Color4,TextureCoordinate,Mesh,IndexedGraphicsGroup,IndexedPhysicsGroup,IndexedVertex,PolygonGroupId,PolygonGroup,PolygonList,IndexedVertexList,PositionId,TextureCoordinateId,NormalId,ColorId,VertexId,RenderConfigId};
use strafesnet_common::model::{Color4,TextureCoordinate,Mesh,MeshBuilder,IndexedGraphicsGroup,IndexedPhysicsGroup,IndexedVertex,PolygonGroupId,PolygonGroup,PolygonList,PositionId,TextureCoordinateId,NormalId,ColorId,VertexId,RenderConfigId}; use strafesnet_common::integer::{vec3,Planar64Vec3};
use strafesnet_common::integer::{vec3,Planar64,Planar64Vec3};
#[derive(Debug)] #[derive(Debug)]
pub enum Primitives{ pub enum Primitives{
@ -10,22 +9,7 @@ pub enum Primitives{
Wedge, Wedge,
CornerWedge, CornerWedge,
} }
#[derive(Debug)] #[derive(Hash,PartialEq,Eq)]
pub struct PrimitivesError;
impl TryFrom<u32> for Primitives{
type Error=PrimitivesError;
fn try_from(value:u32)->Result<Self,Self::Error>{
match value{
0=>Ok(Primitives::Sphere),
1=>Ok(Primitives::Cube),
2=>Ok(Primitives::Cylinder),
3=>Ok(Primitives::Wedge),
4=>Ok(Primitives::CornerWedge),
_=>Err(PrimitivesError),
}
}
}
#[derive(Clone,Copy,Hash,PartialEq,Eq)]
pub enum CubeFace{ pub enum CubeFace{
Right, Right,
Top, Top,
@ -34,22 +18,6 @@ pub enum CubeFace{
Bottom, Bottom,
Front, Front,
} }
#[derive(Debug)]
pub struct CubeFaceError;
impl TryFrom<u32> for CubeFace{
type Error=CubeFaceError;
fn try_from(value:u32)->Result<Self,Self::Error>{
match value{
0=>Ok(CubeFace::Right),
1=>Ok(CubeFace::Top),
2=>Ok(CubeFace::Back),
3=>Ok(CubeFace::Left),
4=>Ok(CubeFace::Bottom),
5=>Ok(CubeFace::Front),
_=>Err(CubeFaceError),
}
}
}
const CUBE_DEFAULT_TEXTURE_COORDS:[TextureCoordinate;4]=[ const CUBE_DEFAULT_TEXTURE_COORDS:[TextureCoordinate;4]=[
TextureCoordinate::new(0.0,0.0), TextureCoordinate::new(0.0,0.0),
TextureCoordinate::new(1.0,0.0), TextureCoordinate::new(1.0,0.0),
@ -74,36 +42,154 @@ const CUBE_DEFAULT_NORMALS:[Planar64Vec3;6]=[
vec3::int( 0,-1, 0),//CubeFace::Bottom vec3::int( 0,-1, 0),//CubeFace::Bottom
vec3::int( 0, 0,-1),//CubeFace::Front vec3::int( 0, 0,-1),//CubeFace::Front
]; ];
const CUBE_DEFAULT_POLYS:[[[u32;3];4];6]=[
// right (1, 0, 0)
[
[6,2,0],//[vertex,tex,norm]
[5,1,0],
[2,0,0],
[1,3,0],
],
// top (0, 1, 0)
[
[5,3,1],
[4,2,1],
[3,1,1],
[2,0,1],
],
// back (0, 0, 1)
[
[0,3,2],
[1,2,2],
[2,1,2],
[3,0,2],
],
// left (-1, 0, 0)
[
[0,2,3],
[3,1,3],
[4,0,3],
[7,3,3],
],
// bottom (0,-1, 0)
[
[1,1,4],
[0,0,4],
[7,3,4],
[6,2,4],
],
// front (0, 0,-1)
[
[4,1,5],
[5,0,5],
[6,3,5],
[7,2,5],
],
];
pub struct CubeFaceDescription([FaceDescription;Self::FACES]); #[derive(Hash,PartialEq,Eq)]
pub enum WedgeFace{
Right,
TopFront,
Back,
Left,
Bottom,
}
const WEDGE_DEFAULT_NORMALS:[Planar64Vec3;5]=[
vec3::int( 1, 0, 0),//Wedge::Right
vec3::int( 0, 1,-1),//Wedge::TopFront
vec3::int( 0, 0, 1),//Wedge::Back
vec3::int(-1, 0, 0),//Wedge::Left
vec3::int( 0,-1, 0),//Wedge::Bottom
];
/*
local cornerWedgeVerticies = {
Vector3.new(-1/2,-1/2,-1/2),7
Vector3.new(-1/2,-1/2, 1/2),0
Vector3.new( 1/2,-1/2,-1/2),6
Vector3.new( 1/2,-1/2, 1/2),1
Vector3.new( 1/2, 1/2,-1/2),5
}
*/
#[derive(Hash,PartialEq,Eq)]
pub enum CornerWedgeFace{
Right,
TopBack,
TopLeft,
Bottom,
Front,
}
const CORNERWEDGE_DEFAULT_NORMALS:[Planar64Vec3;5]=[
vec3::int( 1, 0, 0),//CornerWedge::Right
vec3::int( 0, 1, 1),//CornerWedge::BackTop
vec3::int(-1, 1, 0),//CornerWedge::LeftTop
vec3::int( 0,-1, 0),//CornerWedge::Bottom
vec3::int( 0, 0,-1),//CornerWedge::Front
];
pub fn unit_sphere(render:RenderConfigId)->Mesh{
unit_cube(render)
}
#[derive(Default)]
pub struct CubeFaceDescription([Option<FaceDescription>;6]);
impl CubeFaceDescription{ impl CubeFaceDescription{
pub const FACES:usize=6; pub fn insert(&mut self,index:CubeFace,value:FaceDescription){
pub fn new(RobloxPartDescription(part_description):RobloxPartDescription,textureless_render_id:RenderConfigId)->Self{ self.0[index as usize]=Some(value);
Self(part_description.map(|face_description|match face_description{ }
Some(roblox_texture_transform)=>roblox_texture_transform.to_face_description(), pub fn pairs(self)->std::iter::FilterMap<std::iter::Enumerate<std::array::IntoIter<Option<FaceDescription>,6>>,impl FnMut((usize,Option<FaceDescription>))->Option<(usize,FaceDescription)>>{
None=>FaceDescription::new_with_render_id(textureless_render_id), self.0.into_iter().enumerate().filter_map(|v|v.1.map(|u|(v.0,u)))
}))
} }
} }
pub struct WedgeFaceDescription([FaceDescription;Self::FACES]); pub fn unit_cube(render:RenderConfigId)->Mesh{
let mut t=CubeFaceDescription::default();
t.insert(CubeFace::Right,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Top,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Back,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Left,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Bottom,FaceDescription::new_with_render_id(render));
t.insert(CubeFace::Front,FaceDescription::new_with_render_id(render));
generate_partial_unit_cube(t)
}
pub fn unit_cylinder(render:RenderConfigId)->Mesh{
//lmao
unit_cube(render)
}
#[derive(Default)]
pub struct WedgeFaceDescription([Option<FaceDescription>;5]);
impl WedgeFaceDescription{ impl WedgeFaceDescription{
pub const FACES:usize=5; pub fn insert(&mut self,index:WedgeFace,value:FaceDescription){
pub fn new(RobloxWedgeDescription(part_description):RobloxWedgeDescription,textureless_render_id:RenderConfigId)->Self{ self.0[index as usize]=Some(value);
Self(part_description.map(|face_description|match face_description{ }
Some(roblox_texture_transform)=>roblox_texture_transform.to_face_description(), pub fn pairs(self)->std::iter::FilterMap<std::iter::Enumerate<std::array::IntoIter<Option<FaceDescription>,5>>,impl FnMut((usize,Option<FaceDescription>))->Option<(usize,FaceDescription)>>{
None=>FaceDescription::new_with_render_id(textureless_render_id), self.0.into_iter().enumerate().filter_map(|v|v.1.map(|u|(v.0,u)))
}))
} }
} }
pub struct CornerWedgeFaceDescription([FaceDescription;Self::FACES]); pub fn unit_wedge(render:RenderConfigId)->Mesh{
let mut t=WedgeFaceDescription::default();
t.insert(WedgeFace::Right,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::TopFront,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::Back,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::Left,FaceDescription::new_with_render_id(render));
t.insert(WedgeFace::Bottom,FaceDescription::new_with_render_id(render));
generate_partial_unit_wedge(t)
}
#[derive(Default)]
pub struct CornerWedgeFaceDescription([Option<FaceDescription>;5]);
impl CornerWedgeFaceDescription{ impl CornerWedgeFaceDescription{
pub const FACES:usize=5; pub fn insert(&mut self,index:CornerWedgeFace,value:FaceDescription){
pub fn new(RobloxCornerWedgeDescription(part_description):RobloxCornerWedgeDescription,textureless_render_id:RenderConfigId)->Self{ self.0[index as usize]=Some(value);
Self(part_description.map(|face_description|match face_description{
Some(roblox_texture_transform)=>roblox_texture_transform.to_face_description(),
None=>FaceDescription::new_with_render_id(textureless_render_id),
}))
} }
pub fn pairs(self)->std::iter::FilterMap<std::iter::Enumerate<std::array::IntoIter<Option<FaceDescription>,5>>,impl FnMut((usize,Option<FaceDescription>))->Option<(usize,FaceDescription)>>{
self.0.into_iter().enumerate().filter_map(|v|v.1.map(|u|(v.0,u)))
}
}
pub fn unit_cornerwedge(render:RenderConfigId)->Mesh{
let mut t=CornerWedgeFaceDescription::default();
t.insert(CornerWedgeFace::Right,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::TopBack,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::TopLeft,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::Bottom,FaceDescription::new_with_render_id(render));
t.insert(CornerWedgeFace::Front,FaceDescription::new_with_render_id(render));
generate_partial_unit_cornerwedge(t)
} }
#[derive(Clone)] #[derive(Clone)]
@ -113,7 +199,7 @@ pub struct FaceDescription{
pub color:Color4, pub color:Color4,
} }
impl FaceDescription{ impl FaceDescription{
pub fn new_with_render_id(render:RenderConfigId)->Self{ pub fn new_with_render_id(render:RenderConfigId)->Self {
Self{ Self{
render, render,
transform:glam::Affine2::IDENTITY, transform:glam::Affine2::IDENTITY,
@ -121,51 +207,7 @@ impl FaceDescription{
} }
} }
} }
pub fn unit_cube(CubeFaceDescription(face_descriptions):CubeFaceDescription)->Mesh{ pub fn generate_partial_unit_cube(face_descriptions:CubeFaceDescription)->Mesh{
const CUBE_DEFAULT_POLYS:[[[u32;2];4];6]=[
// right (1, 0, 0)
[
[6,2],//[vertex,tex]
[5,1],
[2,0],
[1,3],
],
// top (0, 1, 0)
[
[5,3],
[4,2],
[3,1],
[2,0],
],
// back (0, 0, 1)
[
[0,3],
[1,2],
[2,1],
[3,0],
],
// left (-1, 0, 0)
[
[0,2],
[3,1],
[4,0],
[7,3],
],
// bottom (0,-1, 0)
[
[1,1],
[0,0],
[7,3],
[6,2],
],
// front (0, 0,-1)
[
[4,1],
[5,0],
[6,3],
[7,2],
],
];
let mut generated_pos=Vec::new(); let mut generated_pos=Vec::new();
let mut generated_tex=Vec::new(); let mut generated_tex=Vec::new();
let mut generated_normal=Vec::new(); let mut generated_normal=Vec::new();
@ -176,7 +218,7 @@ pub fn unit_cube(CubeFaceDescription(face_descriptions):CubeFaceDescription)->Me
let mut physics_group=IndexedPhysicsGroup::default(); let mut physics_group=IndexedPhysicsGroup::default();
let mut transforms=Vec::new(); let mut transforms=Vec::new();
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices. //note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
for (face_id,face_description) in face_descriptions.into_iter().enumerate(){ for (face_id,face_description) in face_descriptions.pairs(){
//assume that scanning short lists is faster than hashing. //assume that scanning short lists is faster than hashing.
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){ let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
transform_index transform_index
@ -203,8 +245,8 @@ pub fn unit_cube(CubeFaceDescription(face_descriptions):CubeFaceDescription)->Me
//push vertices as they are needed //push vertices as they are needed
let group_id=PolygonGroupId::new(polygon_groups.len() as u32); let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![ polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![
CUBE_DEFAULT_POLYS[face_id].map(|[pos_id,tex_id]|{ CUBE_DEFAULT_POLYS[face_id].map(|tup|{
let pos=CUBE_DEFAULT_VERTICES[pos_id as usize]; let pos=CUBE_DEFAULT_VERTICES[tup[0] as usize];
let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){ let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){
pos_index pos_index
}else{ }else{
@ -216,7 +258,7 @@ pub fn unit_cube(CubeFaceDescription(face_descriptions):CubeFaceDescription)->Me
//always push vertex //always push vertex
let vertex=IndexedVertex{ let vertex=IndexedVertex{
pos:PositionId::new(pos_index), pos:PositionId::new(pos_index),
tex:TextureCoordinateId::new(tex_id+4*transform_index), tex:TextureCoordinateId::new(tup[1]+4*transform_index),
normal:NormalId::new(normal_index), normal:NormalId::new(normal_index),
color:ColorId::new(color_index), color:ColorId::new(color_index),
}; };
@ -243,49 +285,42 @@ pub fn unit_cube(CubeFaceDescription(face_descriptions):CubeFaceDescription)->Me
} }
} }
//don't think too hard about the copy paste because this is all going into the map tool eventually... //don't think too hard about the copy paste because this is all going into the map tool eventually...
pub fn unit_wedge(WedgeFaceDescription(face_descriptions):WedgeFaceDescription)->Mesh{ pub fn generate_partial_unit_wedge(face_descriptions:WedgeFaceDescription)->Mesh{
const WEDGE_DEFAULT_POLYS:[&[[u32;2]];5]=[ let wedge_default_polys=[
// right (1, 0, 0) // right (1, 0, 0)
&[ vec![
[6,2],//[vertex,tex] [6,2,0],//[vertex,tex,norm]
[2,0], [2,0,0],
[1,3], [1,3,0],
], ],
// FrontTop (0, 1, -1) // FrontTop (0, 1, -1)
&[ vec![
[3,1], [3,1,1],
[2,0], [2,0,1],
[6,3], [6,3,1],
[7,2], [7,2,1],
], ],
// back (0, 0, 1) // back (0, 0, 1)
&[ vec![
[0,3], [0,3,2],
[1,2], [1,2,2],
[2,1], [2,1,2],
[3,0], [3,0,2],
], ],
// left (-1, 0, 0) // left (-1, 0, 0)
&[ vec![
[0,2], [0,2,3],
[3,1], [3,1,3],
[7,3], [7,3,3],
], ],
// bottom (0,-1, 0) // bottom (0,-1, 0)
&[ vec![
[1,1], [1,1,4],
[0,0], [0,0,4],
[7,3], [7,3,4],
[6,2], [6,2,4],
], ],
]; ];
const WEDGE_DEFAULT_NORMALS:[Planar64Vec3;5]=[
vec3::int( 1, 0, 0),//Wedge::Right
vec3::int( 0, 1,-1),//Wedge::TopFront
vec3::int( 0, 0, 1),//Wedge::Back
vec3::int(-1, 0, 0),//Wedge::Left
vec3::int( 0,-1, 0),//Wedge::Bottom
];
let mut generated_pos=Vec::new(); let mut generated_pos=Vec::new();
let mut generated_tex=Vec::new(); let mut generated_tex=Vec::new();
let mut generated_normal=Vec::new(); let mut generated_normal=Vec::new();
@ -296,7 +331,7 @@ pub fn unit_wedge(WedgeFaceDescription(face_descriptions):WedgeFaceDescription)-
let mut physics_group=IndexedPhysicsGroup::default(); let mut physics_group=IndexedPhysicsGroup::default();
let mut transforms=Vec::new(); let mut transforms=Vec::new();
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices. //note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
for (face_id,face_description) in face_descriptions.into_iter().enumerate(){ for (face_id,face_description) in face_descriptions.pairs(){
//assume that scanning short lists is faster than hashing. //assume that scanning short lists is faster than hashing.
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){ let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
transform_index transform_index
@ -323,8 +358,8 @@ pub fn unit_wedge(WedgeFaceDescription(face_descriptions):WedgeFaceDescription)-
//push vertices as they are needed //push vertices as they are needed
let group_id=PolygonGroupId::new(polygon_groups.len() as u32); let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![ polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![
WEDGE_DEFAULT_POLYS[face_id].iter().map(|&[pos_id,tex_id]|{ wedge_default_polys[face_id].iter().map(|tup|{
let pos=CUBE_DEFAULT_VERTICES[pos_id as usize]; let pos=CUBE_DEFAULT_VERTICES[tup[0] as usize];
let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){ let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){
pos_index pos_index
}else{ }else{
@ -336,7 +371,7 @@ pub fn unit_wedge(WedgeFaceDescription(face_descriptions):WedgeFaceDescription)-
//always push vertex //always push vertex
let vertex=IndexedVertex{ let vertex=IndexedVertex{
pos:PositionId::new(pos_index), pos:PositionId::new(pos_index),
tex:TextureCoordinateId::new(tex_id+4*transform_index), tex:TextureCoordinateId::new(tup[1]+4*transform_index),
normal:NormalId::new(normal_index), normal:NormalId::new(normal_index),
color:ColorId::new(color_index), color:ColorId::new(color_index),
}; };
@ -363,47 +398,40 @@ pub fn unit_wedge(WedgeFaceDescription(face_descriptions):WedgeFaceDescription)-
} }
} }
pub fn unit_cornerwedge(CornerWedgeFaceDescription(face_descriptions):CornerWedgeFaceDescription)->Mesh{ pub fn generate_partial_unit_cornerwedge(face_descriptions:CornerWedgeFaceDescription)->Mesh{
const CORNERWEDGE_DEFAULT_POLYS:[&[[u32;2]];5]=[ let cornerwedge_default_polys=[
// right (1, 0, 0) // right (1, 0, 0)
&[ vec![
[6,2],//[vertex,tex] [6,2,0],//[vertex,tex,norm]
[5,1], [5,1,0],
[1,3], [1,3,0],
], ],
// BackTop (0, 1, 1) // BackTop (0, 1, 1)
&[ vec![
[5,3], [5,3,1],
[0,1], [0,1,1],
[1,0], [1,0,1],
], ],
// LeftTop (-1, 1, 0) // LeftTop (-1, 1, 0)
&[ vec![
[5,3], [5,3,2],
[7,2], [7,2,2],
[0,1], [0,1,2],
], ],
// bottom (0,-1, 0) // bottom (0,-1, 0)
&[ vec![
[1,1], [1,1,3],
[0,0], [0,0,3],
[7,3], [7,3,3],
[6,2], [6,2,3],
], ],
// front (0, 0,-1) // front (0, 0,-1)
&[ vec![
[5,0], [5,0,4],
[6,3], [6,3,4],
[7,2], [7,2,4],
], ],
]; ];
const CORNERWEDGE_DEFAULT_NORMALS:[Planar64Vec3;5]=[
vec3::int( 1, 0, 0),//CornerWedge::Right
vec3::int( 0, 1, 1),//CornerWedge::BackTop
vec3::int(-1, 1, 0),//CornerWedge::LeftTop
vec3::int( 0,-1, 0),//CornerWedge::Bottom
vec3::int( 0, 0,-1),//CornerWedge::Front
];
let mut generated_pos=Vec::new(); let mut generated_pos=Vec::new();
let mut generated_tex=Vec::new(); let mut generated_tex=Vec::new();
let mut generated_normal=Vec::new(); let mut generated_normal=Vec::new();
@ -414,7 +442,7 @@ pub fn unit_cornerwedge(CornerWedgeFaceDescription(face_descriptions):CornerWedg
let mut physics_group=IndexedPhysicsGroup::default(); let mut physics_group=IndexedPhysicsGroup::default();
let mut transforms=Vec::new(); let mut transforms=Vec::new();
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices. //note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
for (face_id,face_description) in face_descriptions.into_iter().enumerate(){ for (face_id,face_description) in face_descriptions.pairs(){
//assume that scanning short lists is faster than hashing. //assume that scanning short lists is faster than hashing.
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){ let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
transform_index transform_index
@ -441,8 +469,8 @@ pub fn unit_cornerwedge(CornerWedgeFaceDescription(face_descriptions):CornerWedg
//push vertices as they are needed //push vertices as they are needed
let group_id=PolygonGroupId::new(polygon_groups.len() as u32); let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![ polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(vec![
CORNERWEDGE_DEFAULT_POLYS[face_id].iter().map(|&[pos_id,tex_id]|{ cornerwedge_default_polys[face_id].iter().map(|tup|{
let pos=CUBE_DEFAULT_VERTICES[pos_id as usize]; let pos=CUBE_DEFAULT_VERTICES[tup[0] as usize];
let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){ let pos_index=if let Some(pos_index)=generated_pos.iter().position(|&p|p==pos){
pos_index pos_index
}else{ }else{
@ -454,7 +482,7 @@ pub fn unit_cornerwedge(CornerWedgeFaceDescription(face_descriptions):CornerWedg
//always push vertex //always push vertex
let vertex=IndexedVertex{ let vertex=IndexedVertex{
pos:PositionId::new(pos_index), pos:PositionId::new(pos_index),
tex:TextureCoordinateId::new(tex_id+4*transform_index), tex:TextureCoordinateId::new(tup[1]+4*transform_index),
normal:NormalId::new(normal_index), normal:NormalId::new(normal_index),
color:ColorId::new(color_index), color:ColorId::new(color_index),
}; };
@ -480,133 +508,3 @@ pub fn unit_cornerwedge(CornerWedgeFaceDescription(face_descriptions):CornerWedg
physics_groups:vec![physics_group], physics_groups:vec![physics_group],
} }
} }
// TODO: fix face texture orientation
pub fn unit_cylinder(face_descriptions:CubeFaceDescription)->Mesh{
// cylinder is oriented about the x axis
// roblox cylinders use projected grid coordinates
/// how many grid coordinates to use (positive and negative)
const GON:i32=3;
/// grid perimeter
const POINTS:[[i32;2];4*2*GON as usize]=const{
let mut points=[[0;2];{4*2*GON as usize}];
let mut i=-GON;
while i<GON{
points[(i+GON) as usize]=[i,GON];
points[(i+GON+1*2*GON) as usize]=[GON,-i];
points[(i+GON+2*2*GON) as usize]=[-i,-GON];
points[(i+GON+3*2*GON) as usize]=[-GON,i];
i+=1;
}
points
};
let mut mb=MeshBuilder::new();
let mut polygon_groups=Vec::with_capacity(CubeFaceDescription::FACES);
let mut graphics_groups=Vec::with_capacity(CubeFaceDescription::FACES);
let mut physics_group=IndexedPhysicsGroup{groups:Vec::with_capacity(CubeFaceDescription::FACES)};
let CubeFaceDescription([right,top,back,left,bottom,front])=face_descriptions;
macro_rules! end_face{
($face_description:expr,$end:expr,$iter:expr)=>{
let normal=mb.acquire_normal_id($end);
let color=mb.acquire_color_id($face_description.color);
// single polygon for physics
let polygon:Vec<_>=$iter.map(|[x,y]|{
let tex=mb.acquire_tex_id(
$face_description.transform.transform_point2(
(glam::vec2(-x as f32,y as f32).normalize()+1.0)/2.0
)
);
let pos=mb.acquire_pos_id($end+vec3::int(0,-x,y).with_length(Planar64::ONE).divide().wrap_1());
mb.acquire_vertex_id(IndexedVertex{pos,tex,normal,color})
}).collect();
// fanned polygons for graphics
let pos=mb.acquire_pos_id($end);
let tex=mb.acquire_tex_id($face_description.transform.transform_point2(glam::Vec2::ONE/2.0));
let center=mb.acquire_vertex_id(IndexedVertex{pos,tex,normal,color});
let polygon_list=(0..POINTS.len()).map(|i|
vec![center,polygon[i],polygon[(i+1)%POINTS.len()]]
).collect();
// end face graphics
let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(polygon_list)));
graphics_groups.push(IndexedGraphicsGroup{
render:$face_description.render,
groups:vec![group_id],
});
// end face physics
let polygon_list=vec![polygon];
let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(polygon_list)));
physics_group.groups.push(group_id);
}
}
macro_rules! tex{
($face_description:expr,$tex:expr)=>{{
let [x,y]=$tex;
$face_description.transform.transform_point2(
glam::vec2((x+GON) as f32,(y+GON) as f32)/(2*GON) as f32
)
}};
}
macro_rules! barrel_face{
($face_description:expr,$loop:ident,$lo_dir:expr,$hi_dir:expr,$tex_0:expr,$tex_1:expr,$tex_2:expr,$tex_3:expr)=>{
let mut polygon_list=Vec::with_capacity(CubeFaceDescription::FACES);
for $loop in -GON..GON{
// lo Z
let lz_dir=$lo_dir.with_length(Planar64::ONE).divide().wrap_1();
// hi Z
let hz_dir=$hi_dir.with_length(Planar64::ONE).divide().wrap_1();
// pos
let lx_lz_pos=mb.acquire_pos_id(vec3::NEG_X+lz_dir);
let lx_hz_pos=mb.acquire_pos_id(vec3::NEG_X+hz_dir);
let hx_hz_pos=mb.acquire_pos_id(vec3::X+hz_dir);
let hx_lz_pos=mb.acquire_pos_id(vec3::X+lz_dir);
// tex
let lx_lz_tex=mb.acquire_tex_id(tex!($face_description,$tex_0));
let lx_hz_tex=mb.acquire_tex_id(tex!($face_description,$tex_1));
let hx_hz_tex=mb.acquire_tex_id(tex!($face_description,$tex_2));
let hx_lz_tex=mb.acquire_tex_id(tex!($face_description,$tex_3));
// norm
let lz_norm=mb.acquire_normal_id(lz_dir);
let hz_norm=mb.acquire_normal_id(hz_dir);
// color
let color=mb.acquire_color_id($face_description.color);
polygon_list.push(vec![
mb.acquire_vertex_id(IndexedVertex{pos:lx_lz_pos,tex:lx_lz_tex,normal:lz_norm,color}),
mb.acquire_vertex_id(IndexedVertex{pos:lx_hz_pos,tex:lx_hz_tex,normal:hz_norm,color}),
mb.acquire_vertex_id(IndexedVertex{pos:hx_hz_pos,tex:hx_hz_tex,normal:hz_norm,color}),
mb.acquire_vertex_id(IndexedVertex{pos:hx_lz_pos,tex:hx_lz_tex,normal:lz_norm,color}),
]);
}
// push face
let group_id=PolygonGroupId::new(polygon_groups.len() as u32);
polygon_groups.push(PolygonGroup::PolygonList(PolygonList::new(polygon_list)));
graphics_groups.push(IndexedGraphicsGroup{
render:$face_description.render,
groups:vec![group_id],
});
physics_group.groups.push(group_id);
};
}
end_face!(right, vec3::X,POINTS.into_iter());
barrel_face!(top, z,vec3::int(0,GON,z),vec3::int(0,GON,z+1), [GON,z],[GON,z+1],[-GON,z+1],[-GON,z]);
barrel_face!(back, y,vec3::int(0,y+1,GON),vec3::int(0,y,GON), [GON,y+1],[GON,y],[-GON,y],[-GON,y+1]);
end_face!(left, vec3::NEG_X,POINTS.into_iter().rev());
barrel_face!(bottom, z,vec3::int(0,-GON,z+1),vec3::int(0,-GON,z), [-GON,z+1],[-GON,z],[GON,z],[GON,z+1]);
barrel_face!(front, y,vec3::int(0,y,-GON),vec3::int(0,y+1,-GON), [-GON,y],[-GON,y+1],[GON,y+1],[GON,y]);
let physics_groups=vec![physics_group];
mb.build(polygon_groups,graphics_groups,physics_groups)
}

File diff suppressed because it is too large Load Diff

@ -1,178 +0,0 @@
use rbx_mesh::mesh_data::{NormalId2 as MeshDataNormalId2,VertexId as MeshDataVertexId};
use rbx_mesh::physics_data::VertexId as PhysicsDataVertexId;
use strafesnet_common::model::{self,IndexedVertex,PolygonGroup,PolygonGroupId,PolygonList,RenderConfigId};
use strafesnet_common::integer::vec3;
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error{
Block,
MissingVertexId(u32),
Planar64Vec3(strafesnet_common::integer::Planar64TryFromFloatError),
RobloxPhysicsData(rbx_mesh::physics_data::Error),
RobloxMeshData(rbx_mesh::mesh_data::Error),
}
impl std::fmt::Display for Error{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
// wacky state machine to make sure all vertices in a face agree upon what NormalId to use.
// Roblox duplicates this information per vertex when it should only exist per-face.
enum MeshDataNormalStatus{
Agree(MeshDataNormalId2),
Conflicting,
}
struct MeshDataNormalChecker{
status:Option<MeshDataNormalStatus>,
}
impl MeshDataNormalChecker{
fn new()->Self{
Self{status:None}
}
fn check(&mut self,normal:MeshDataNormalId2){
self.status=match self.status.take(){
None=>Some(MeshDataNormalStatus::Agree(normal)),
Some(MeshDataNormalStatus::Agree(old_normal))=>{
if old_normal==normal{
Some(MeshDataNormalStatus::Agree(old_normal))
}else{
Some(MeshDataNormalStatus::Conflicting)
}
},
Some(MeshDataNormalStatus::Conflicting)=>Some(MeshDataNormalStatus::Conflicting),
};
}
fn into_agreed_normal(self)->Option<MeshDataNormalId2>{
self.status.and_then(|status|match status{
MeshDataNormalStatus::Agree(normal)=>Some(normal),
MeshDataNormalStatus::Conflicting=>None,
})
}
}
impl std::error::Error for Error{}
pub fn convert(
roblox_physics_data:&[u8],
roblox_mesh_data:&[u8],
size:glam::Vec3,
crate::rbx::RobloxPartDescription(part_texture_description):crate::rbx::RobloxPartDescription,
)->Result<model::Mesh,Error>{
const NORMAL_FACES:usize=6;
let mut polygon_groups_normal_id=vec![Vec::new();NORMAL_FACES];
// build graphics and physics meshes
let mut mb=strafesnet_common::model::MeshBuilder::new();
// graphics
let graphics_groups=if !roblox_mesh_data.is_empty(){
// create per-face texture coordinate affine transforms
let cube_face_description=part_texture_description.map(|opt|opt.map(|mut t|{
t.transform.set_size(1.0,1.0);
t.to_face_description()
}));
let mesh_data=rbx_mesh::read_mesh_data_versioned(
std::io::Cursor::new(roblox_mesh_data)
).map_err(Error::RobloxMeshData)?;
let graphics_mesh=match mesh_data{
rbx_mesh::mesh_data::MeshData::CSGK(_)=>return Err(Error::Block),
rbx_mesh::mesh_data::MeshData::CSGMDL(rbx_mesh::mesh_data::CSGMDL::CSGMDL2(mesh_data2))=>mesh_data2.mesh,
rbx_mesh::mesh_data::MeshData::CSGMDL(rbx_mesh::mesh_data::CSGMDL::CSGMDL4(mesh_data4))=>mesh_data4.mesh,
};
for [MeshDataVertexId(vertex_id0),MeshDataVertexId(vertex_id1),MeshDataVertexId(vertex_id2)] in graphics_mesh.faces{
let face=[
graphics_mesh.vertices.get(vertex_id0 as usize).ok_or(Error::MissingVertexId(vertex_id0))?,
graphics_mesh.vertices.get(vertex_id1 as usize).ok_or(Error::MissingVertexId(vertex_id1))?,
graphics_mesh.vertices.get(vertex_id2 as usize).ok_or(Error::MissingVertexId(vertex_id2))?,
];
let mut normal_agreement_checker=MeshDataNormalChecker::new();
let face=face.into_iter().map(|vertex|{
normal_agreement_checker.check(vertex.normal_id);
let pos=mb.acquire_pos_id(vec3::try_from_f32_array(vertex.pos)?);
let normal=mb.acquire_normal_id(vec3::try_from_f32_array(vertex.norm)?);
let tex_coord=glam::Vec2::from_array(vertex.tex);
let maybe_face_description=&cube_face_description[vertex.normal_id as usize-1];
let (tex,color)=match maybe_face_description{
Some(face_description)=>{
// transform texture coordinates and set decal color
let tex=mb.acquire_tex_id(face_description.transform.transform_point2(tex_coord));
let color=mb.acquire_color_id(face_description.color);
(tex,color)
},
None=>{
// texture coordinates don't matter and pass through mesh vertex color
let tex=mb.acquire_tex_id(tex_coord);
let color=mb.acquire_color_id(glam::Vec4::from_array(vertex.color.map(|f|f as f32/255.0f32)));
(tex,color)
},
};
Ok(mb.acquire_vertex_id(IndexedVertex{pos,tex,normal,color}))
}).collect::<Result<Vec<_>,_>>().map_err(Error::Planar64Vec3)?;
if let Some(normal_id)=normal_agreement_checker.into_agreed_normal(){
polygon_groups_normal_id[normal_id as usize-1].push(face);
}else{
panic!("Empty face!");
}
}
(0..NORMAL_FACES).map(|polygon_group_id|{
model::IndexedGraphicsGroup{
render:cube_face_description[polygon_group_id].as_ref().map_or(RenderConfigId::new(0),|face_description|face_description.render),
groups:vec![PolygonGroupId::new(polygon_group_id as u32)]
}
}).collect()
}else{
Vec::new()
};
//physics
let physics_convex_meshes=if !roblox_physics_data.is_empty(){
let physics_data=rbx_mesh::read_physics_data_versioned(
std::io::Cursor::new(roblox_physics_data)
).map_err(Error::RobloxPhysicsData)?;
let physics_convex_meshes=match physics_data{
rbx_mesh::physics_data::PhysicsData::CSGK(_)
// have not seen this format in practice
|rbx_mesh::physics_data::PhysicsData::CSGPHS(rbx_mesh::physics_data::CSGPHS::Block)
=>return Err(Error::Block),
rbx_mesh::physics_data::PhysicsData::CSGPHS(rbx_mesh::physics_data::CSGPHS::Meshes(meshes))
=>meshes.meshes,
rbx_mesh::physics_data::PhysicsData::CSGPHS(rbx_mesh::physics_data::CSGPHS::PhysicsInfoMesh(pim))
=>vec![pim.mesh],
};
physics_convex_meshes
}else{
Vec::new()
};
let polygon_groups:Vec<PolygonGroup>=polygon_groups_normal_id.into_iter().map(|faces|
// graphics polygon groups (to be rendered)
Ok(PolygonGroup::PolygonList(PolygonList::new(faces)))
).chain(physics_convex_meshes.into_iter().map(|mesh|{
// this can be factored out of the loop but I am lazy
let color=mb.acquire_color_id(glam::Vec4::ONE);
let tex=mb.acquire_tex_id(glam::Vec2::ZERO);
// physics polygon groups (to do physics)
Ok(PolygonGroup::PolygonList(PolygonList::new(mesh.faces.into_iter().map(|[PhysicsDataVertexId(vertex_id0),PhysicsDataVertexId(vertex_id1),PhysicsDataVertexId(vertex_id2)]|{
let face=[
mesh.vertices.get(vertex_id0 as usize).ok_or(Error::MissingVertexId(vertex_id0))?,
mesh.vertices.get(vertex_id1 as usize).ok_or(Error::MissingVertexId(vertex_id1))?,
mesh.vertices.get(vertex_id2 as usize).ok_or(Error::MissingVertexId(vertex_id2))?,
].map(|v|glam::Vec3::from_slice(v)/size);
let vertex_norm=(face[1]-face[0])
.cross(face[2]-face[0]);
let normal=mb.acquire_normal_id(vec3::try_from_f32_array(vertex_norm.to_array()).map_err(Error::Planar64Vec3)?);
face.into_iter().map(|vertex_pos|{
let pos=mb.acquire_pos_id(vec3::try_from_f32_array(vertex_pos.to_array()).map_err(Error::Planar64Vec3)?);
Ok(mb.acquire_vertex_id(IndexedVertex{pos,tex,normal,color}))
}).collect()
}).collect::<Result<_,_>>()?)))
})).collect::<Result<_,_>>()?;
let physics_groups=(NORMAL_FACES..polygon_groups.len()).map(|id|model::IndexedPhysicsGroup{
groups:vec![PolygonGroupId::new(id as u32)]
}).collect();
Ok(mb.build(
polygon_groups,
graphics_groups,
physics_groups,
))
}

@ -1,11 +0,0 @@
[package]
name = "rbxassetid"
version = "0.1.0"
edition = "2024"
repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0"
description = "Parse Roblox asset id from 'Content' urls."
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
[dependencies]
url = "2.5.4"

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

@ -1,26 +0,0 @@
Roblox Asset Id
===============
## Example
```rust
use rbxassetid::RobloxAssetId;
let content="rbxassetid://255299419";
let RobloxAssetId(asset_id)=content.parse()?;
```
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>

@ -1,41 +0,0 @@
#[derive(Clone,Copy,Debug,Hash,Eq,PartialEq)]
pub struct RobloxAssetId(pub u64);
#[derive(Debug)]
pub enum RobloxAssetIdParseErr{
Url(url::ParseError),
UnknownScheme,
ParseInt(std::num::ParseIntError),
MissingAssetId,
MissingIDQueryParam,
}
impl std::fmt::Display for RobloxAssetIdParseErr{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for RobloxAssetIdParseErr{}
impl std::str::FromStr for RobloxAssetId{
type Err=RobloxAssetIdParseErr;
fn from_str(s:&str)->Result<Self,Self::Err>{
let url=url::Url::parse(s).map_err(RobloxAssetIdParseErr::Url)?;
let parsed_asset_id=match url.scheme(){
"rbxassetid"=>url.domain().ok_or(RobloxAssetIdParseErr::MissingAssetId)?.parse(),
"http"|"https"=>{
let (_,asset_id)=url.query_pairs()
.find(|(id,_)|match id.as_ref(){
"ID"|"id"|"Id"|"iD"=>true,
_=>false,
}).ok_or(RobloxAssetIdParseErr::MissingIDQueryParam)?;
asset_id.parse()
},
_=>Err(RobloxAssetIdParseErr::UnknownScheme)?,
};
Ok(Self(parsed_asset_id.map_err(RobloxAssetIdParseErr::ParseInt)?))
}
}
#[test]
fn test_rbxassetid(){
let content="rbxassetid://255299419";
let RobloxAssetId(_asset_id)=content.parse().unwrap();
}

@ -1,7 +1,7 @@
[package] [package]
name = "roblox_emulator" name = "roblox_emulator"
version = "0.4.7" version = "0.4.7"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
description = "Run embedded Luau scripts which manipulate the DOM." description = "Run embedded Luau scripts which manipulate the DOM."
@ -12,7 +12,7 @@ default=["run-service"]
run-service=[] run-service=[]
[dependencies] [dependencies]
glam = "0.30.0" glam = "0.29.0"
mlua = { version = "0.10.1", features = ["luau"] } mlua = { version = "0.10.1", features = ["luau"] }
phf = { version = "0.11.2", features = ["macros"] } phf = { version = "0.11.2", features = ["macros"] }
rbx_dom_weak = { version = "2.7.0", registry = "strafesnet" } rbx_dom_weak = { version = "2.7.0", registry = "strafesnet" }

@ -1,22 +1,22 @@
macro_rules! type_from_lua_userdata{ macro_rules! type_from_lua_userdata{
($ty:ident)=>{ ($asd:ident)=>{
impl mlua::FromLua for $ty{ impl mlua::FromLua for $asd{
fn from_lua(value:mlua::Value,_lua:&mlua::Lua)->Result<Self,mlua::Error>{ fn from_lua(value:mlua::Value,_lua:&mlua::Lua)->Result<Self,mlua::Error>{
match value{ match value{
mlua::Value::UserData(ud)=>Ok(*ud.borrow::<Self>()?), mlua::Value::UserData(ud)=>Ok(*ud.borrow::<Self>()?),
other=>Err(mlua::Error::runtime(format!("Expected {} got {:?}",stringify!($ty),other))), other=>Err(mlua::Error::runtime(format!("Expected {} got {:?}",stringify!($asd),other))),
} }
} }
} }
}; };
} }
macro_rules! type_from_lua_userdata_lua_lifetime{ macro_rules! type_from_lua_userdata_lua_lifetime{
($ty:ident)=>{ ($asd:ident)=>{
impl mlua::FromLua for $ty<'static>{ impl mlua::FromLua for $asd<'static>{
fn from_lua(value:mlua::Value,_lua:&mlua::Lua)->Result<Self,mlua::Error>{ fn from_lua(value:mlua::Value,_lua:&mlua::Lua)->Result<Self,mlua::Error>{
match value{ match value{
mlua::Value::UserData(ud)=>Ok(*ud.borrow::<Self>()?), mlua::Value::UserData(ud)=>Ok(*ud.borrow::<Self>()?),
other=>Err(mlua::Error::runtime(format!("Expected {} got {:?}",stringify!($ty),other))), other=>Err(mlua::Error::runtime(format!("Expected {} got {:?}",stringify!($asd),other))),
} }
} }
} }

@ -59,8 +59,8 @@ fn schedule_thread(lua:&mlua::Lua,dt:mlua::Value)->Result<(),mlua::Error>{
match delay.classify(){ match delay.classify(){
std::num::FpCategory::Nan=>Err(mlua::Error::runtime("NaN"))?, std::num::FpCategory::Nan=>Err(mlua::Error::runtime("NaN"))?,
// cases where the number is too large to schedule // cases where the number is too large to schedule
std::num::FpCategory::Infinite std::num::FpCategory::Infinite=>return Ok(()),
|std::num::FpCategory::Normal if (u64::MAX as f64)<delay=>{ std::num::FpCategory::Normal=>if (u64::MAX as f64)<delay{
return Ok(()); return Ok(());
}, },
_=>(), _=>(),

@ -1,11 +1,11 @@
[package] [package]
name = "strafesnet_snf" name = "strafesnet_snf"
version = "0.3.0" version = "0.2.0"
edition = "2024" edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
binrw = "0.14.0" binrw = "0.14.0"
id = { version = "0.1.0", registry = "strafesnet" } id = { version = "0.1.0", registry = "strafesnet" }
strafesnet_common = { version = "0.6.0", path = "../common", registry = "strafesnet" } strafesnet_common = { path = "../common", registry = "strafesnet" }

@ -1,347 +1,98 @@
use binrw::{binrw,BinReaderExt,BinWrite,BinWriterExt}; use binrw::{BinReaderExt, binrw};
use crate::newtypes;
use crate::file::BlockId;
use strafesnet_common::physics::Time;
const VERSION:u32=0;
type TimedPhysicsInstruction=strafesnet_common::instruction::TimedInstruction<strafesnet_common::physics::Instruction,strafesnet_common::physics::Time>;
#[derive(Debug)] #[derive(Debug)]
pub enum Error{ pub enum Error{
InvalidHeader(binrw::Error), InvalidHeader,
InvalidSegment(binrw::Error), InvalidSegment(binrw::Error),
SegmentConvert(newtypes::integer::RatioError),
InstructionConvert(newtypes::physics::InstructionConvert),
InstructionWrite(binrw::Error),
InvalidSegmentId(SegmentId), InvalidSegmentId(SegmentId),
InvalidData(binrw::Error),
IO(std::io::Error),
File(crate::file::Error), File(crate::file::Error),
} }
// Bot files are simply the sequence of instructions that the physics received during the run.
// The instructions are partitioned into timestamped blocks for ease of streaming.
//
// Keyframe information required for efficient seeking
// is part of a different file, and is generated from this file.
/* block types /* block types
BLOCK_BOT_HEADER: BLOCK_BOT_HEADER:
// Segments are laid out in chronological order, u128 map_resource_uuid //which map is this bot running
// but block_id is not necessarily in ascending order. //don't include style info in bot header because it's in the simulation state
// //blocks are laid out in chronological order, but indices may jump around.
// This is to place the final segment close to the start of the file, u64 num_segments
// which allows the duration of the bot to be conveniently calculated
// from the first and last instruction timestamps.
//
// Use exact physics version for replay playback
// Use highest compatible physics version for verification
u32 physics_version
u32 num_segments
for _ in 0..num_segments{ for _ in 0..num_segments{
i64 time i64 time //simulation_state timestamp
u32 instruction_count u64 block_id
u32 block_id
} }
BLOCK_BOT_SEGMENT: BLOCK_BOT_SEGMENT:
// segments can potentially be losslessly compressed! //format version indicates what version of these structures to use
for _ in 0..instruction_count{ SimulationState simulation_state //SimulationState is just under ClientState which includes Play/Pause events that the simulation doesn't know about.
// TODO: delta encode as much as possible (time,mousepos) //to read, greedily decode instructions until eof
i64 time loop{
physics::Instruction instruction //delta encode as much as possible (time,mousepos)
//strafe ticks are implied
//physics can be implied in an input-only bot file
TimedInstruction<SimulationInstruction> instruction
} }
*/ */
#[binrw] //error hiding mock code
#[brw(little)] mod simulation{
struct SegmentHeader{ #[super::binrw]
time:i64, #[brw(little)]
instruction_count:u32, pub struct State{}
block_id:BlockId, #[super::binrw]
} #[brw(little)]
#[binrw] pub struct Instruction{}
#[brw(little)]
struct Header{
physics_version:u32,
num_segments:u32,
#[br(count=num_segments)]
segments:Vec<SegmentHeader>,
} }
// mod instruction{
// #[super::binrw]
// #[brw(little)]
// pub struct TimedInstruction<Instruction:binrw::BinRead+binrw::BinWrite>{
// time:u64,
// instruction:Instruction
// }
// }
// mod timeline{
// #[super::binrw]
// #[brw(little)]
// pub struct Timeline<Instruction:binrw::BinRead+binrw::BinWrite>{
// #[bw(try_calc(u32::try_from(instructions.len())))]
// instruction_count:u32,
// #[br(count=instruction_count)]
// instructions:Vec<super::instruction::TimedInstruction<Instruction>>
// }
// }
//serious code
#[binrw] #[binrw]
#[brw(little)] #[brw(little)]
#[derive(Clone,Copy,Debug,id::Id)] #[derive(Clone,Copy,Debug,id::Id)]
pub struct SegmentId(u32); pub struct SegmentId(u32);
#[binrw]
#[brw(little)]
pub struct Segment{ pub struct Segment{
pub instructions:Vec<TimedPhysicsInstruction> state:simulation::State,
} //#[bw(try_calc(u32::try_from(instructions.len())))]
//instruction_count:u32,
//#[br(count=instruction_count)]
//instructions:Vec<instruction::TimedInstruction<simulation::Instruction>>
#[derive(Clone,Copy,Debug)] //please remember that strafe ticks are implicit! 33% smaller bot files
pub struct SegmentInfo{
/// time of the first instruction in this segment.
time:Time,
instruction_count:u32,
/// How many total instructions in segments up to and including this segment
/// Alternatively, the id of the first instruction be in the _next_ segment
instructions_subtotal:u64,
block_id:BlockId,
} }
pub struct StreamableBot<R:BinReaderExt>{ pub struct StreamableBot<R:BinReaderExt>{
file:crate::file::File<R>, file:crate::file::File<R>,
segment_map:Vec<SegmentInfo>, //timeline:timeline::Timeline<SegmentId>,
segment_id_to_block_id:Vec<crate::file::BlockId>,
} }
impl<R:BinReaderExt> StreamableBot<R>{ impl<R:BinReaderExt> StreamableBot<R>{
pub(crate) fn new(mut file:crate::file::File<R>)->Result<Self,Error>{ pub(crate) fn new(file:crate::file::File<R>)->Result<Self,Error>{
//assume the file seek is in the right place to start reading header Err(Error::InvalidHeader)
let header:Header=file.data_mut().read_le().map_err(Error::InvalidHeader)?;
let mut instructions_subtotal=0;
let segment_map=header.segments.into_iter().map(|SegmentHeader{time,instruction_count,block_id}|{
instructions_subtotal+=instruction_count as u64;
SegmentInfo{
time:Time::raw(time),
instruction_count,
instructions_subtotal,
block_id,
}
}).collect();
Ok(Self{
file,
segment_map,
})
}
fn get_segment_info(&self,segment_id:SegmentId)->Result<SegmentInfo,Error>{
Ok(*self.segment_map.get(segment_id.get() as usize).ok_or(Error::InvalidSegmentId(segment_id))?)
}
pub fn find_segments_instruction_range(&self,start_instruction:u64,end_instruction:u64)->&[SegmentInfo]{
let start=self.segment_map.partition_point(|segment_info|segment_info.instructions_subtotal<start_instruction);
let end=self.segment_map.partition_point(|segment_info|segment_info.instructions_subtotal<end_instruction);
&self.segment_map[start..=end]
}
// pub fn find_segments_time_range(&self,start_time:Time,end_time:Time)->&[SegmentInfo]{
// // TODO: This is off by one, both should be one less
// let start=self.segment_map.partition_point(|segment_info|segment_info.time<start_time);
// let end=self.segment_map.partition_point(|segment_info|segment_info.time<end_time);
// &self.segment_map[start..=end]
// }
fn append_to_segment(&mut self,segment_info:SegmentInfo,segment:&mut Segment)->Result<(),Error>{
let mut block=self.file.block_reader(segment_info.block_id).map_err(Error::File)?;
for _ in 0..segment_info.instruction_count{
let instruction:newtypes::physics::TimedInstruction=block.read_le().map_err(Error::InvalidSegment)?;
segment.instructions.push(instruction.try_into().map_err(Error::SegmentConvert)?);
}
Ok(())
}
pub fn load_segment(&mut self,segment_info:SegmentInfo)->Result<Segment,Error>{
let mut segment=Segment{
instructions:Vec::with_capacity(segment_info.instruction_count as usize),
};
self.append_to_segment(segment_info,&mut segment)?;
Ok(segment)
}
pub fn read_all(&mut self)->Result<Segment,Error>{
let mut segment=Segment{
instructions:Vec::new(),
};
for i in 0..self.segment_map.len(){
let segment_info=self.segment_map[i];
self.append_to_segment(segment_info,&mut segment)?;
} }
pub fn load_segment(&mut self,segment_id:SegmentId)->Result<Segment,Error>{
let block_id=*self.segment_id_to_block_id.get(segment_id.get() as usize).ok_or(Error::InvalidSegmentId(segment_id))?;
let mut block=self.file.block_reader(block_id).map_err(Error::File)?;
let segment=block.read_le().map_err(Error::InvalidSegment)?;
Ok(segment) Ok(segment)
} }
} }
const MAX_BLOCK_SIZE:usize=64*1024;//64 kB
pub fn write_bot<W:BinWriterExt>(mut writer:W,physics_version:u32,instructions:impl IntoIterator<Item=TimedPhysicsInstruction>)->Result<(),Error>{
// decide which instructions to put in which segment
// write segment 1 to block 1
// write segment N to block 2
// write rest of segments
// 1 2 3 4 5
// becomes
// [1 5] 2 3 4
struct SegmentHeaderInfo{
time:Time,
instruction_count:u32,
range:core::ops::Range<usize>
}
let mut segment_header_infos=Vec::new();
let mut raw_segments=std::io::Cursor::new(Vec::new());
// block info
let mut start_time=Time::ZERO;
let mut start_position=raw_segments.position() as usize;
let mut instruction_count=0;
let mut last_position=start_position;
let mut iter=instructions.into_iter();
macro_rules! collect_instruction{
($instruction:expr)=>{
let time=$instruction.time;
let instruction_writable:newtypes::physics::TimedInstruction=$instruction.try_into().map_err(Error::InstructionConvert)?;
instruction_writable.write_le(&mut raw_segments).map_err(Error::InstructionWrite)?;
instruction_count+=1;
let position=raw_segments.position() as usize;
// exceeds max block size
if MAX_BLOCK_SIZE<position-last_position{
segment_header_infos.push(SegmentHeaderInfo{
time:start_time,
instruction_count,
range:start_position..last_position,
});
start_position=last_position;
instruction_count=0;
start_time=time;
}
last_position=position;
}
}
// unroll one loop iteration to grab the starting time
if let Some(instruction)=iter.next(){
start_time=instruction.time;
collect_instruction!(instruction);
}
for instruction in iter{
collect_instruction!(instruction);
}
//last block, whatever size it happens to be
{
let final_position=raw_segments.position() as usize;
segment_header_infos.push(SegmentHeaderInfo{
time:start_time,
instruction_count,
range:start_position..final_position,
});
}
// drop cursor
let raw_segments=raw_segments.into_inner();
let num_segments=segment_header_infos.len();
// segments list is in chronological order
let make_segment_header=|block_id,&SegmentHeaderInfo{time,instruction_count,range:ref _range}|SegmentHeader{
time:time.get(),
instruction_count,
block_id,
};
let segments=if 2<num_segments{
let mut segments=Vec::with_capacity(num_segments);
// segment 1 is second block
if let Some(seg)=segment_header_infos.first(){
segments.push(make_segment_header(BlockId::new(1),seg));
}
// rest of segments start at fourth block
for (i,seg) in segment_header_infos[1..num_segments-1].iter().enumerate(){
make_segment_header(BlockId::new(3+i as u32),seg);
}
// last segment is third block
if let Some(seg)=segment_header_infos.last(){
segments.push(make_segment_header(BlockId::new(2),seg));
}
segments
}else{
// all segments in order
segment_header_infos.iter().enumerate().map(|(i,seg)|
make_segment_header(BlockId::new(1+i as u32),seg)
).collect()
};
let header=Header{
physics_version,
num_segments:num_segments as u32,
segments,
};
// map header is +1
let block_count=1+num_segments as u32;
let mut offset=crate::file::Header::calculate_size(block_count) as u64;
// block_location is one longer than block_count
let mut block_location=Vec::with_capacity(1+block_count as usize);
//probe header length
let mut bot_header_data=Vec::new();
binrw::BinWrite::write_le(&header,&mut std::io::Cursor::new(&mut bot_header_data)).map_err(Error::InvalidData)?;
// the first block location is the map header
block_location.push(offset);
offset+=bot_header_data.len() as u64;
block_location.push(offset);
// priming includes file header + first 3 blocks [bot header, first segment, last segment]
let priming=if 2<num_segments{
// segment 1 is block 2
if let Some(seg)=segment_header_infos.first(){
offset+=seg.range.len() as u64;
block_location.push(offset);
}
// last segment is block 3
if let Some(seg)=segment_header_infos.last(){
offset+=seg.range.len() as u64;
block_location.push(offset);
}
let priming=offset;
// rest of segments
for seg in &segment_header_infos[1..num_segments-1]{
offset+=seg.range.len() as u64;
block_location.push(offset);
}
priming
}else{
// all segments in order
for seg in &segment_header_infos{
offset+=seg.range.len() as u64;
block_location.push(offset);
}
offset
};
let file_header=crate::file::Header{
fourcc:crate::file::FourCC::Bot,
version:VERSION,
priming,
resource:0,
block_count,
block_location,
};
// write file header
writer.write_le(&file_header).map_err(Error::InvalidData)?;
// write bot header
writer.write(&bot_header_data).map_err(Error::IO)?;
// write blocks
if 2<num_segments{
// segment 1 is block 2
if let Some(seg)=segment_header_infos.first(){
writer.write(&raw_segments[seg.range.clone()]).map_err(Error::IO)?;
}
// last segment is block 3
if let Some(seg)=segment_header_infos.last(){
writer.write(&raw_segments[seg.range.clone()]).map_err(Error::IO)?;
}
// rest of segments
for seg in &segment_header_infos[1..num_segments-1]{
writer.write(&raw_segments[seg.range.clone()]).map_err(Error::IO)?;
}
}else{
// all segments in order
for seg in segment_header_infos{
writer.write(&raw_segments[seg.range]).map_err(Error::IO)?;
}
}
Ok(())
}

@ -73,16 +73,6 @@ pub struct Header{
#[br(count=block_count+1)] #[br(count=block_count+1)]
pub block_location:Vec<u64>, pub block_location:Vec<u64>,
} }
impl Header{
pub const fn calculate_size(block_count:u32)->usize{
4 // fourcc
+4 // version
+8 // priming
+16 // resource
+4 // block_count
+(block_count as usize+1)*8 // block_location
}
}
#[binrw] #[binrw]
#[brw(little)] #[brw(little)]

@ -6,7 +6,7 @@ use crate::file::BlockId;
use binrw::{binrw,BinReaderExt,BinWriterExt}; use binrw::{binrw,BinReaderExt,BinWriterExt};
use strafesnet_common::model; use strafesnet_common::model;
use strafesnet_common::aabb::Aabb; use strafesnet_common::aabb::Aabb;
use strafesnet_common::bvh::{BvhNode,RecursiveContent}; use strafesnet_common::bvh::BvhNode;
use strafesnet_common::gameplay_modes; use strafesnet_common::gameplay_modes;
#[derive(Debug)] #[derive(Debug)]
@ -86,7 +86,6 @@ for model_id in 0..num_models{
//if you hash the resource itself and set the first 8 bits to this, that's the resource uuid //if you hash the resource itself and set the first 8 bits to this, that's the resource uuid
#[binrw] #[binrw]
#[brw(little,repr=u8)] #[brw(little,repr=u8)]
#[repr(u8)]
enum ResourceType{ enum ResourceType{
Mesh, Mesh,
Texture, Texture,
@ -95,6 +94,21 @@ enum ResourceType{
//Video, //Video,
//Animation, //Animation,
} }
const RESOURCE_TYPE_VARIANT_COUNT:u8=2;
#[binrw]
#[brw(little)]
struct ResourceId(u128);
impl ResourceId{
fn resource_type(&self)->Option<ResourceType>{
let discriminant=self.0 as u8;
//TODO: use this when it is stabilized https://github.com/rust-lang/rust/issues/73662
//if (discriminant as usize)<std::mem::variant_count::<ResourceType>(){
match discriminant<RESOURCE_TYPE_VARIANT_COUNT{
true=>Some(unsafe{std::mem::transmute::<u8,ResourceType>(discriminant)}),
false=>None,
}
}
}
struct ResourceMap<T>{ struct ResourceMap<T>{
meshes:HashMap<strafesnet_common::model::MeshId,T>, meshes:HashMap<strafesnet_common::model::MeshId,T>,
@ -121,6 +135,11 @@ struct ResourceBlockHeader{
resource:ResourceType, resource:ResourceType,
id:BlockId, id:BlockId,
} }
#[binrw]
#[brw(little)]
struct ResourceExternalHeader{
resource_uuid:ResourceId,
}
#[binrw] #[binrw]
#[brw(little)] #[brw(little)]
@ -138,7 +157,7 @@ struct MapHeader{
//#[br(count=num_resources_external)] //#[br(count=num_resources_external)]
//external_resources:Vec<ResourceExternalHeader>, //external_resources:Vec<ResourceExternalHeader>,
#[br(count=num_modes)] #[br(count=num_modes)]
modes:Vec<newtypes::gameplay_modes::NormalizedMode>, modes:Vec<newtypes::gameplay_modes::Mode>,
#[br(count=num_attributes)] #[br(count=num_attributes)]
attributes:Vec<newtypes::gameplay_attributes::CollisionAttributes>, attributes:Vec<newtypes::gameplay_attributes::CollisionAttributes>,
#[br(count=num_render_configs)] #[br(count=num_render_configs)]
@ -181,7 +200,7 @@ fn read_texture<R:BinReaderExt>(file:&mut crate::file::File<R>,block_id:BlockId)
pub struct StreamableMap<R:BinReaderExt>{ pub struct StreamableMap<R:BinReaderExt>{
file:crate::file::File<R>, file:crate::file::File<R>,
//this includes every platform... move the unconstrained datas to their appropriate data block? //this includes every platform... move the unconstrained datas to their appropriate data block?
modes:gameplay_modes::NormalizedModes, modes:gameplay_modes::Modes,
//this is every possible attribute... need some sort of streaming system //this is every possible attribute... need some sort of streaming system
attributes:Vec<strafesnet_common::gameplay_attributes::CollisionAttributes>, attributes:Vec<strafesnet_common::gameplay_attributes::CollisionAttributes>,
//this is every possible render configuration... shaders and such... need streaming //this is every possible render configuration... shaders and such... need streaming
@ -223,7 +242,7 @@ impl<R:BinReaderExt> StreamableMap<R>{
} }
Ok(Self{ Ok(Self{
file, file,
modes:strafesnet_common::gameplay_modes::NormalizedModes::new(modes), modes:strafesnet_common::gameplay_modes::Modes::new(modes),
attributes, attributes,
render_configs, render_configs,
bvh:strafesnet_common::bvh::generate_bvh(bvh), bvh:strafesnet_common::bvh::generate_bvh(bvh),
@ -233,7 +252,7 @@ impl<R:BinReaderExt> StreamableMap<R>{
} }
pub fn get_intersecting_region_block_ids(&self,aabb:&Aabb)->Vec<BlockId>{ pub fn get_intersecting_region_block_ids(&self,aabb:&Aabb)->Vec<BlockId>{
let mut block_ids=Vec::new(); let mut block_ids=Vec::new();
self.bvh.sample_aabb(aabb,&mut |&block_id|block_ids.push(block_id)); self.bvh.the_tester(aabb,&mut |&block_id|block_ids.push(block_id));
block_ids block_ids
} }
pub fn load_region(&mut self,block_id:BlockId)->Result<Vec<(model::ModelId,model::Model)>,Error>{ pub fn load_region(&mut self,block_id:BlockId)->Result<Vec<(model::ModelId,model::Model)>,Error>{
@ -287,60 +306,12 @@ impl<R:BinReaderExt> StreamableMap<R>{
} }
} }
// silly redefinition of Bvh for determining the size of subnodes
// without duplicating work by running weight calculation recursion top down on every node
pub struct BvhWeightNode<W,T>{
content:RecursiveContent<BvhWeightNode<W,T>,T>,
weight:W,
aabb:Aabb,
}
impl <W,T> BvhWeightNode<W,T>{
pub const fn weight(&self)->&W{
&self.weight
}
pub const fn aabb(&self)->&Aabb{
&self.aabb
}
pub fn into_content(self)->RecursiveContent<BvhWeightNode<W,T>,T>{
self.content
}
pub fn into_visitor<F:FnMut(T)>(self,f:&mut F){
match self.content{
RecursiveContent::Leaf(model)=>f(model),
RecursiveContent::Branch(children)=>for child in children{
child.into_visitor(f)
},
}
}
}
pub fn weigh_contents<T,W:Copy+std::iter::Sum<W>,F:Fn(&T)->W>(node:BvhNode<T>,f:&F)->BvhWeightNode<W,T>{
let (content,aabb)=node.into_inner();
match content{
RecursiveContent::Leaf(model)=>BvhWeightNode{
weight:f(&model),
content:RecursiveContent::Leaf(model),
aabb,
},
RecursiveContent::Branch(children)=>{
let branch:Vec<BvhWeightNode<W,T>>=children.into_iter().map(|child|
weigh_contents(child,f)
).collect();
BvhWeightNode{
weight:branch.iter().map(|node|node.weight).sum(),
content:RecursiveContent::Branch(branch),
aabb,
}
},
}
}
const BVH_NODE_MAX_WEIGHT:usize=64*1024;//64 kB const BVH_NODE_MAX_WEIGHT:usize=64*1024;//64 kB
fn collect_spacial_blocks( fn collect_spacial_blocks(
block_location:&mut Vec<u64>, block_location:&mut Vec<u64>,
block_headers:&mut Vec<SpacialBlockHeader>, block_headers:&mut Vec<SpacialBlockHeader>,
sequential_block_data:&mut std::io::Cursor<&mut Vec<u8>>, sequential_block_data:&mut std::io::Cursor<&mut Vec<u8>>,
bvh_node:BvhWeightNode<usize,(model::ModelId,newtypes::model::Model)> bvh_node:strafesnet_common::bvh::BvhWeightNode<usize,(model::ModelId,newtypes::model::Model)>
)->Result<(),Error>{ )->Result<(),Error>{
//inspect the node weights top-down. //inspect the node weights top-down.
//When a node weighs less than the limit, //When a node weighs less than the limit,
@ -386,11 +357,11 @@ pub fn write_map<W:BinWriterExt>(mut writer:W,map:strafesnet_common::map::Comple
let mesh=map.meshes.get(model.mesh.get() as usize).ok_or(Error::InvalidMeshId(model.mesh))?; let mesh=map.meshes.get(model.mesh.get() as usize).ok_or(Error::InvalidMeshId(model.mesh))?;
let mut aabb=strafesnet_common::aabb::Aabb::default(); let mut aabb=strafesnet_common::aabb::Aabb::default();
for &pos in &mesh.unique_pos{ for &pos in &mesh.unique_pos{
aabb.grow(model.transform.transform_point3(pos).narrow_1().unwrap()); aabb.grow(model.transform.transform_point3(pos).fix_1());
} }
Ok(((model::ModelId::new(model_id as u32),model.into()),aabb)) Ok(((model::ModelId::new(model_id as u32),model.into()),aabb))
}).collect::<Result<Vec<_>,_>>()?; }).collect::<Result<Vec<_>,_>>()?;
let bvh=weigh_contents(strafesnet_common::bvh::generate_bvh(boxen),&|_|std::mem::size_of::<newtypes::model::Model>()); let bvh=strafesnet_common::bvh::generate_bvh(boxen).weigh_contents(&|_|std::mem::size_of::<newtypes::model::Model>());
//build blocks //build blocks
//block location is initialized with two values //block location is initialized with two values
//the first value represents the location of the first byte after the file header //the first value represents the location of the first byte after the file header
@ -430,36 +401,38 @@ pub fn write_map<W:BinWriterExt>(mut writer:W,map:strafesnet_common::map::Comple
num_spacial_blocks:spacial_blocks.len() as u32, num_spacial_blocks:spacial_blocks.len() as u32,
num_resource_blocks:resource_blocks.len() as u32, num_resource_blocks:resource_blocks.len() as u32,
//num_resources_external:0, //num_resources_external:0,
num_modes:map.modes.len() as u32, num_modes:map.modes.modes.len() as u32,
num_attributes:map.attributes.len() as u32, num_attributes:map.attributes.len() as u32,
num_render_configs:map.render_configs.len() as u32, num_render_configs:map.render_configs.len() as u32,
spacial_blocks, spacial_blocks,
resource_blocks, resource_blocks,
//external_resources:Vec::new(), //external_resources:Vec::new(),
modes:map.modes.into_iter().map(Into::into).collect(), modes:map.modes.modes.into_iter().map(Into::into).collect(),
attributes:map.attributes.into_iter().map(Into::into).collect(), attributes:map.attributes.into_iter().map(Into::into).collect(),
render_configs:map.render_configs.into_iter().map(Into::into).collect(), render_configs:map.render_configs.into_iter().map(Into::into).collect(),
}; };
//probe header length let mut file_header=crate::file::Header{
let mut map_header_data=Vec::new();
binrw::BinWrite::write_le(&map_header,&mut std::io::Cursor::new(&mut map_header_data)).map_err(Error::InvalidData)?;
// calculate final file header
let mut offset=crate::file::Header::calculate_size(block_count) as u64;
offset+=map_header_data.len() as u64;
// priming includes map header
let priming=offset;
for position in &mut block_location{
*position+=offset;
}
let file_header=crate::file::Header{
fourcc:crate::file::FourCC::Map, fourcc:crate::file::FourCC::Map,
version:0, version:0,
priming, priming:0,
resource:0, resource:0,
block_count, block_count,
block_location, block_location,
}; };
//probe header length
let mut file_header_data=Vec::new();
binrw::BinWrite::write_le(&file_header,&mut std::io::Cursor::new(&mut file_header_data)).map_err(Error::InvalidData)?;
let mut map_header_data=Vec::new();
binrw::BinWrite::write_le(&map_header,&mut std::io::Cursor::new(&mut map_header_data)).map_err(Error::InvalidData)?;
//update file header according to probe data
let mut offset=file_header_data.len() as u64;
file_header.priming=offset;
file_header.block_location[0]=offset;
offset+=map_header_data.len() as u64;
for position in &mut file_header.block_location[1..]{
*position+=offset;
}
//write (updated) file header //write (updated) file header
writer.write_le(&file_header).map_err(Error::InvalidData)?; writer.write_le(&file_header).map_err(Error::InvalidData)?;

@ -1,9 +1,7 @@
mod common; mod common;
pub mod aabb; pub mod aabb;
pub mod model; pub mod model;
pub mod mouse;
pub mod integer; pub mod integer;
pub mod physics;
pub mod gameplay_modes; pub mod gameplay_modes;
pub mod gameplay_style; pub mod gameplay_style;
pub mod gameplay_attributes; pub mod gameplay_attributes;

@ -1,9 +1,3 @@
pub const fn flag(b:bool,mask:u8)->u8{ pub const fn flag(b:bool,mask:u8)->u8{
(-(b as i8) as u8)&mask (-(b as i8) as u8)&mask
} }
pub fn bool_from_u8(value:u8)->bool{
value!=0
}
pub fn bool_into_u8(value:&bool)->u8{
*value as u8
}

@ -157,7 +157,7 @@ pub struct ModeHeader{
} }
#[binrw::binrw] #[binrw::binrw]
#[brw(little)] #[brw(little)]
pub struct NormalizedMode{ pub struct Mode{
pub header:ModeHeader, pub header:ModeHeader,
pub style:super::gameplay_style::StyleModifiers, pub style:super::gameplay_style::StyleModifiers,
pub start:u32, pub start:u32,
@ -179,10 +179,10 @@ impl std::fmt::Display for ModeError{
} }
} }
impl std::error::Error for ModeError{} impl std::error::Error for ModeError{}
impl TryInto<strafesnet_common::gameplay_modes::NormalizedMode> for NormalizedMode{ impl TryInto<strafesnet_common::gameplay_modes::Mode> for Mode{
type Error=ModeError; type Error=ModeError;
fn try_into(self)->Result<strafesnet_common::gameplay_modes::NormalizedMode,Self::Error>{ fn try_into(self)->Result<strafesnet_common::gameplay_modes::Mode,Self::Error>{
Ok(strafesnet_common::gameplay_modes::NormalizedMode::new(strafesnet_common::gameplay_modes::Mode::new( Ok(strafesnet_common::gameplay_modes::Mode::new(
self.style.try_into().map_err(ModeError::StyleModifier)?, self.style.try_into().map_err(ModeError::StyleModifier)?,
strafesnet_common::model::ModelId::new(self.start), strafesnet_common::model::ModelId::new(self.start),
self.zones.into_iter().map(|(model_id,zone)| self.zones.into_iter().map(|(model_id,zone)|
@ -192,12 +192,12 @@ impl TryInto<strafesnet_common::gameplay_modes::NormalizedMode> for NormalizedMo
self.elements.into_iter().map(|(model_id,stage_element)| self.elements.into_iter().map(|(model_id,stage_element)|
Ok((strafesnet_common::model::ModelId::new(model_id),stage_element.try_into()?)) Ok((strafesnet_common::model::ModelId::new(model_id),stage_element.try_into()?))
).collect::<Result<_,_>>().map_err(ModeError::StageElement)?, ).collect::<Result<_,_>>().map_err(ModeError::StageElement)?,
))) ))
} }
} }
impl From<strafesnet_common::gameplay_modes::NormalizedMode> for NormalizedMode{ impl From<strafesnet_common::gameplay_modes::Mode> for Mode{
fn from(value:strafesnet_common::gameplay_modes::NormalizedMode)->Self{ fn from(value:strafesnet_common::gameplay_modes::Mode)->Self{
let (style,start,zones,stages,elements)=value.into_inner().into_inner(); let (style,start,zones,stages,elements)=value.into_inner();
Self{ Self{
header:ModeHeader{ header:ModeHeader{
zones:zones.len() as u32, zones:zones.len() as u32,

@ -38,23 +38,6 @@ pub struct Ratio64Vec2{
pub x:Ratio64, pub x:Ratio64,
pub y:Ratio64, pub y:Ratio64,
} }
impl TryInto<strafesnet_common::integer::Ratio64Vec2> for Ratio64Vec2{
type Error=RatioError;
fn try_into(self)->Result<strafesnet_common::integer::Ratio64Vec2,Self::Error>{
Ok(strafesnet_common::integer::Ratio64Vec2{
x:self.x.try_into()?,
y:self.y.try_into()?,
})
}
}
impl From<strafesnet_common::integer::Ratio64Vec2> for Ratio64Vec2{
fn from(value:strafesnet_common::integer::Ratio64Vec2)->Self{
Self{
x:value.x.into(),
y:value.y.into(),
}
}
}
pub type Angle32=i32; pub type Angle32=i32;
pub type Planar64=i64; pub type Planar64=i64;

@ -1,25 +0,0 @@
use super::integer::Time;
#[binrw::binrw]
#[brw(little)]
pub struct MouseState{
pub pos:[i32;2],
pub time:Time,
}
impl<T> Into<strafesnet_common::mouse::MouseState<T>> for MouseState{
fn into(self)->strafesnet_common::mouse::MouseState<T>{
strafesnet_common::mouse::MouseState{
pos:self.pos.into(),
time:strafesnet_common::integer::Time::raw(self.time),
}
}
}
impl<T> From<strafesnet_common::mouse::MouseState<T>> for MouseState{
fn from(value:strafesnet_common::mouse::MouseState<T>)->Self{
Self{
pos:value.pos.to_array(),
time:value.time.get(),
}
}
}

@ -1,156 +0,0 @@
use super::integer::Time;
use super::common::{bool_from_u8,bool_into_u8};
type TimedPhysicsInstruction=strafesnet_common::instruction::TimedInstruction<strafesnet_common::physics::Instruction,strafesnet_common::physics::Time>;
#[binrw::binrw]
#[brw(little)]
pub struct TimedInstruction{
pub time:Time,
pub instruction:Instruction,
}
impl TryInto<TimedPhysicsInstruction> for TimedInstruction{
type Error=super::integer::RatioError;
fn try_into(self)->Result<TimedPhysicsInstruction,Self::Error>{
Ok(strafesnet_common::instruction::TimedInstruction{
time:strafesnet_common::integer::Time::raw(self.time),
instruction:self.instruction.try_into()?,
})
}
}
impl TryFrom<TimedPhysicsInstruction> for TimedInstruction{
type Error=super::physics::InstructionConvert;
fn try_from(value:TimedPhysicsInstruction)->Result<Self,Self::Error>{
Ok(Self{
time:value.time.get(),
instruction:value.instruction.try_into()?,
})
}
}
#[binrw::binrw]
#[brw(little)]
pub enum Instruction{
#[brw(magic=0u8)]
ReplaceMouse{
m0:super::mouse::MouseState,
m1:super::mouse::MouseState
},
#[brw(magic=1u8)]
SetNextMouse(super::mouse::MouseState),
#[brw(magic=2u8)]
SetMoveRight(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=3u8)]
SetMoveUp(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=4u8)]
SetMoveBack(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=5u8)]
SetMoveLeft(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=6u8)]
SetMoveDown(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=7u8)]
SetMoveForward(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=8u8)]
SetJump(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=9u8)]
SetZoom(
#[br(map=bool_from_u8)]
#[bw(map=bool_into_u8)]
bool),
#[brw(magic=10u8)]
Reset,
#[brw(magic=11u8)]
Restart(super::gameplay_modes::ModeId),
#[brw(magic=12u8)]
Spawn(super::gameplay_modes::ModeId,super::gameplay_modes::StageId),
#[brw(magic=13u8)]
PracticeFly,
#[brw(magic=14u8)]
SetSensitivity(super::integer::Ratio64Vec2),
#[brw(magic=255u8)]
Idle,
}
#[derive(Debug)]
pub enum InstructionConvert{
/// This is an instruction that can be dropped when serializing
DropInstruction,
}
impl std::fmt::Display for InstructionConvert{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for InstructionConvert{}
impl TryInto<strafesnet_common::physics::Instruction> for Instruction{
type Error=super::integer::RatioError;
fn try_into(self)->Result<strafesnet_common::physics::Instruction,Self::Error>{
Ok(match self{
Instruction::ReplaceMouse{m0,m1}=>strafesnet_common::physics::Instruction::Mouse(strafesnet_common::physics::MouseInstruction::ReplaceMouse{m0:m0.into(),m1:m1.into()}),
Instruction::SetNextMouse(m)=>strafesnet_common::physics::Instruction::Mouse(strafesnet_common::physics::MouseInstruction::SetNextMouse(m.into())),
Instruction::SetMoveRight(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveRight(state.into())),
Instruction::SetMoveUp(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveUp(state.into())),
Instruction::SetMoveBack(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveBack(state.into())),
Instruction::SetMoveLeft(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveLeft(state.into())),
Instruction::SetMoveDown(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveDown(state.into())),
Instruction::SetMoveForward(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveForward(state.into())),
Instruction::SetJump(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetJump(state.into())),
Instruction::SetZoom(state)=>strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetZoom(state.into())),
Instruction::Reset=>strafesnet_common::physics::Instruction::Mode(strafesnet_common::physics::ModeInstruction::Reset),
Instruction::Restart(mode_id)=>strafesnet_common::physics::Instruction::Mode(strafesnet_common::physics::ModeInstruction::Restart(strafesnet_common::gameplay_modes::ModeId::new(mode_id))),
Instruction::Spawn(mode_id,stage_id)=>strafesnet_common::physics::Instruction::Mode(strafesnet_common::physics::ModeInstruction::Spawn(
strafesnet_common::gameplay_modes::ModeId::new(mode_id),
strafesnet_common::gameplay_modes::StageId::new(stage_id),
)),
Instruction::PracticeFly=>strafesnet_common::physics::Instruction::Misc(strafesnet_common::physics::MiscInstruction::PracticeFly),
Instruction::SetSensitivity(sensitivity)=>strafesnet_common::physics::Instruction::Misc(strafesnet_common::physics::MiscInstruction::SetSensitivity(sensitivity.try_into()?)),
Instruction::Idle=>strafesnet_common::physics::Instruction::Idle,
})
}
}
impl TryFrom<strafesnet_common::physics::Instruction> for Instruction{
type Error=InstructionConvert;
fn try_from(value:strafesnet_common::physics::Instruction)->Result<Self,Self::Error>{
match value{
strafesnet_common::physics::Instruction::Mouse(strafesnet_common::physics::MouseInstruction::ReplaceMouse{m0,m1})=>Ok(Instruction::ReplaceMouse{m0:m0.into(),m1:m1.into()}),
strafesnet_common::physics::Instruction::Mouse(strafesnet_common::physics::MouseInstruction::SetNextMouse(m))=>Ok(Instruction::SetNextMouse(m.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveRight(state))=>Ok(Instruction::SetMoveRight(state.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveUp(state))=>Ok(Instruction::SetMoveUp(state.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveBack(state))=>Ok(Instruction::SetMoveBack(state.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveLeft(state))=>Ok(Instruction::SetMoveLeft(state.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveDown(state))=>Ok(Instruction::SetMoveDown(state.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetMoveForward(state))=>Ok(Instruction::SetMoveForward(state.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetJump(state))=>Ok(Instruction::SetJump(state.into())),
strafesnet_common::physics::Instruction::SetControl(strafesnet_common::physics::SetControlInstruction::SetZoom(state))=>Ok(Instruction::SetZoom(state.into())),
strafesnet_common::physics::Instruction::Mode(strafesnet_common::physics::ModeInstruction::Reset)=>Ok(Instruction::Reset),
strafesnet_common::physics::Instruction::Mode(strafesnet_common::physics::ModeInstruction::Restart(mode_id))=>Ok(Instruction::Restart(mode_id.get())),
strafesnet_common::physics::Instruction::Mode(strafesnet_common::physics::ModeInstruction::Spawn(mode_id,stage_id))=>Ok(Instruction::Spawn(
mode_id.get(),
stage_id.get(),
)),
strafesnet_common::physics::Instruction::Misc(strafesnet_common::physics::MiscInstruction::PracticeFly)=>Ok(Instruction::PracticeFly),
strafesnet_common::physics::Instruction::Misc(strafesnet_common::physics::MiscInstruction::SetSensitivity(sensitivity))=>Ok(Instruction::SetSensitivity(sensitivity.into())),
strafesnet_common::physics::Instruction::Idle=>Ok(Instruction::Idle),
}
}
}

@ -1,38 +0,0 @@
[package]
name = "map-tool"
version = "1.7.0"
edition = "2024"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.75"
clap = { version = "4.4.2", features = ["derive"] }
flate2 = "1.0.27"
futures = "0.3.31"
image = "0.25.2"
image_dds = "0.7.1"
lazy-regex = "3.1.0"
rbx_asset = { version = "0.2.5", registry = "strafesnet" }
rbx_binary = { version = "0.7.4", registry = "strafesnet" }
rbx_dom_weak = { version = "2.7.0", registry = "strafesnet" }
rbx_reflection_database = { version = "0.2.10", registry = "strafesnet" }
rbx_xml = { version = "0.13.3", registry = "strafesnet" }
rbxassetid = { version = "0.1.0", registry = "strafesnet" }
strafesnet_bsp_loader = { version = "0.3.0", path = "../lib/bsp_loader", registry = "strafesnet" }
strafesnet_deferred_loader = { version = "0.5.0", path = "../lib/deferred_loader", registry = "strafesnet" }
strafesnet_rbx_loader = { version = "0.6.0", path = "../lib/rbx_loader", registry = "strafesnet" }
strafesnet_snf = { version = "0.3.0", path = "../lib/snf", registry = "strafesnet" }
thiserror = "2.0.11"
tokio = { version = "1.43.0", features = ["macros", "rt-multi-thread", "fs"] }
vbsp = "0.8.0"
vbsp-entities-css = "0.6.0"
vmdl = "0.2.0"
vmt-parser = "0.2.0"
vpk = "0.3.0"
vtf = "0.3.0"
#[profile.release]
#lto = true
#strip = true
#codegen-units = 1

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

@ -1,2 +0,0 @@
# map-tool

@ -1,30 +0,0 @@
mod roblox;
mod source;
use clap::{Parser,Subcommand};
use anyhow::Result as AResult;
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
#[command(propagate_version = true)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands{
#[command(flatten)]
Roblox(roblox::Commands),
#[command(flatten)]
Source(source::Commands),
}
#[tokio::main]
async fn main()->AResult<()>{
let cli=Cli::parse();
match cli.command{
Commands::Roblox(commands)=>commands.run().await,
Commands::Source(commands)=>commands.run().await,
}
}

@ -1,431 +0,0 @@
use std::path::{Path,PathBuf};
use std::io::{Cursor,Read,Seek};
use std::collections::HashSet;
use clap::{Args,Subcommand};
use anyhow::Result as AResult;
use rbx_dom_weak::Instance;
use strafesnet_deferred_loader::deferred_loader::LoadFailureMode;
use rbxassetid::RobloxAssetId;
use tokio::io::AsyncReadExt;
const DOWNLOAD_LIMIT:usize=16;
#[derive(Subcommand)]
pub enum Commands{
RobloxToSNF(RobloxToSNFSubcommand),
DownloadAssets(DownloadAssetsSubcommand),
}
#[derive(Args)]
pub struct RobloxToSNFSubcommand {
#[arg(long)]
output_folder:PathBuf,
#[arg(required=true)]
input_files:Vec<PathBuf>,
}
#[derive(Args)]
pub struct DownloadAssetsSubcommand{
#[arg(required=true)]
roblox_files:Vec<PathBuf>,
// #[arg(long)]
// cookie_file:Option<String>,
}
impl Commands{
pub async fn run(self)->AResult<()>{
match self{
Commands::RobloxToSNF(subcommand)=>roblox_to_snf(subcommand.input_files,subcommand.output_folder).await,
Commands::DownloadAssets(subcommand)=>download_assets(
subcommand.roblox_files,
rbx_asset::cookie::Cookie::new("".to_string()),
).await,
}
}
}
#[allow(unused)]
#[derive(Debug)]
enum LoadDomError{
IO(std::io::Error),
Binary(rbx_binary::DecodeError),
Xml(rbx_xml::DecodeError),
UnknownFormat,
}
fn load_dom<R:Read+Seek>(mut input:R)->Result<rbx_dom_weak::WeakDom,LoadDomError>{
let mut first_8=[0u8;8];
input.read_exact(&mut first_8).map_err(LoadDomError::IO)?;
input.rewind().map_err(LoadDomError::IO)?;
match &first_8{
b"<roblox!"=>rbx_binary::from_reader(input).map_err(LoadDomError::Binary),
b"<roblox "=>rbx_xml::from_reader(input,rbx_xml::DecodeOptions::default()).map_err(LoadDomError::Xml),
_=>Err(LoadDomError::UnknownFormat),
}
}
/* The ones I'm interested in:
Beam.Texture
Decal.Texture
FileMesh.MeshId
FileMesh.TextureId
MaterialVariant.ColorMap
MaterialVariant.MetalnessMap
MaterialVariant.NormalMap
MaterialVariant.RoughnessMap
MeshPart.MeshId
MeshPart.TextureID
ParticleEmitter.Texture
Sky.MoonTextureId
Sky.SkyboxBk
Sky.SkyboxDn
Sky.SkyboxFt
Sky.SkyboxLf
Sky.SkyboxRt
Sky.SkyboxUp
Sky.SunTextureId
SurfaceAppearance.ColorMap
SurfaceAppearance.MetalnessMap
SurfaceAppearance.NormalMap
SurfaceAppearance.RoughnessMap
SurfaceAppearance.TexturePack
*/
fn accumulate_content_id(content_list:&mut HashSet<RobloxAssetId>,object:&Instance,property:&str){
if let Some(rbx_dom_weak::types::Variant::Content(content))=object.properties.get(property){
let url:&str=content.as_ref();
if let Ok(asset_id)=url.parse(){
content_list.insert(asset_id);
}else{
println!("Content failed to parse into AssetID: {:?}",content);
}
}else{
println!("property={} does not exist for class={}",property,object.class.as_str());
}
}
async fn read_entire_file(path:impl AsRef<Path>)->Result<Cursor<Vec<u8>>,std::io::Error>{
let mut file=tokio::fs::File::open(path).await?;
let mut data=Vec::new();
file.read_to_end(&mut data).await?;
Ok(Cursor::new(data))
}
#[derive(Default)]
struct UniqueAssets{
meshes:HashSet<RobloxAssetId>,
unions:HashSet<RobloxAssetId>,
textures:HashSet<RobloxAssetId>,
}
impl UniqueAssets{
fn collect(&mut self,object:&Instance){
match object.class.as_str(){
"Beam"=>accumulate_content_id(&mut self.textures,object,"Texture"),
"Decal"=>accumulate_content_id(&mut self.textures,object,"Texture"),
"Texture"=>accumulate_content_id(&mut self.textures,object,"Texture"),
"FileMesh"=>accumulate_content_id(&mut self.textures,object,"TextureId"),
"MeshPart"=>{
accumulate_content_id(&mut self.textures,object,"TextureID");
accumulate_content_id(&mut self.meshes,object,"MeshId");
},
"SpecialMesh"=>accumulate_content_id(&mut self.meshes,object,"MeshId"),
"ParticleEmitter"=>accumulate_content_id(&mut self.textures,object,"Texture"),
"Sky"=>{
accumulate_content_id(&mut self.textures,object,"MoonTextureId");
accumulate_content_id(&mut self.textures,object,"SkyboxBk");
accumulate_content_id(&mut self.textures,object,"SkyboxDn");
accumulate_content_id(&mut self.textures,object,"SkyboxFt");
accumulate_content_id(&mut self.textures,object,"SkyboxLf");
accumulate_content_id(&mut self.textures,object,"SkyboxRt");
accumulate_content_id(&mut self.textures,object,"SkyboxUp");
accumulate_content_id(&mut self.textures,object,"SunTextureId");
},
"UnionOperation"=>accumulate_content_id(&mut self.unions,object,"AssetId"),
_=>(),
}
}
}
#[allow(unused)]
#[derive(Debug)]
enum UniqueAssetError{
IO(std::io::Error),
LoadDom(LoadDomError),
}
async fn unique_assets(path:&Path)->Result<UniqueAssets,UniqueAssetError>{
// read entire file
let mut assets=UniqueAssets::default();
let data=read_entire_file(path).await.map_err(UniqueAssetError::IO)?;
let dom=load_dom(data).map_err(UniqueAssetError::LoadDom)?;
for object in dom.into_raw().1.into_values(){
assets.collect(&object);
}
Ok(assets)
}
enum DownloadType{
Texture(RobloxAssetId),
Mesh(RobloxAssetId),
Union(RobloxAssetId),
}
impl DownloadType{
fn path(&self)->PathBuf{
match self{
DownloadType::Texture(asset_id)=>format!("downloaded_textures/{}",asset_id.0.to_string()).into(),
DownloadType::Mesh(asset_id)=>format!("meshes/{}",asset_id.0.to_string()).into(),
DownloadType::Union(asset_id)=>format!("unions/{}",asset_id.0.to_string()).into(),
}
}
fn asset_id(&self)->u64{
match self{
DownloadType::Texture(asset_id)=>asset_id.0,
DownloadType::Mesh(asset_id)=>asset_id.0,
DownloadType::Union(asset_id)=>asset_id.0,
}
}
}
enum DownloadResult{
Cached(PathBuf),
Data(Vec<u8>),
Failed,
}
#[derive(Default,Debug)]
struct Stats{
total_assets:u32,
cached_assets:u32,
downloaded_assets:u32,
failed_downloads:u32,
timed_out_downloads:u32,
}
async fn download_retry(stats:&mut Stats,context:&rbx_asset::cookie::CookieContext,download_instruction:DownloadType)->Result<DownloadResult,std::io::Error>{
stats.total_assets+=1;
let download_instruction=download_instruction;
// check if file exists on disk
let path=download_instruction.path();
if tokio::fs::try_exists(path.as_path()).await?{
stats.cached_assets+=1;
return Ok(DownloadResult::Cached(path));
}
let asset_id=download_instruction.asset_id();
// if not, download file
let mut retry=0;
const BACKOFF_MUL:f32=1.3956124250860895286;//exp(1/3)
let mut backoff=1000f32;
loop{
let asset_result=context.get_asset(rbx_asset::cookie::GetAssetRequest{
asset_id,
version:None,
}).await;
match asset_result{
Ok(asset_result)=>{
stats.downloaded_assets+=1;
tokio::fs::write(path,&asset_result).await?;
break Ok(DownloadResult::Data(asset_result));
},
Err(rbx_asset::cookie::GetError::Response(rbx_asset::ResponseError::StatusCodeWithUrlAndBody(scwuab)))=>{
if scwuab.status_code.as_u16()==429{
if retry==12{
println!("Giving up asset download {asset_id}");
stats.timed_out_downloads+=1;
break Ok(DownloadResult::Failed);
}
println!("Hit roblox rate limit, waiting {:.0}ms...",backoff);
tokio::time::sleep(std::time::Duration::from_millis(backoff as u64)).await;
backoff*=BACKOFF_MUL;
retry+=1;
}else{
stats.failed_downloads+=1;
println!("weird scuwab error: {scwuab:?}");
break Ok(DownloadResult::Failed);
}
},
Err(e)=>{
stats.failed_downloads+=1;
println!("sadly error: {e}");
break Ok(DownloadResult::Failed);
},
}
}
}
#[derive(Debug,thiserror::Error)]
enum ConvertTextureError{
#[error("Io error {0:?}")]
Io(#[from]std::io::Error),
#[error("Image error {0:?}")]
Image(#[from]image::ImageError),
#[error("DDS create error {0:?}")]
DDS(#[from]image_dds::CreateDdsError),
#[error("DDS write error {0:?}")]
DDSWrite(#[from]image_dds::ddsfile::Error),
}
async fn convert_texture(asset_id:RobloxAssetId,download_result:DownloadResult)->Result<(),ConvertTextureError>{
let data=match download_result{
DownloadResult::Cached(path)=>tokio::fs::read(path).await?,
DownloadResult::Data(data)=>data,
DownloadResult::Failed=>return Ok(()),
};
// image::ImageFormat::Png
// image::ImageFormat::Jpeg
let image=image::load_from_memory(&data)?.to_rgba8();
// pick format
let format=if image.width()%4!=0||image.height()%4!=0{
image_dds::ImageFormat::Rgba8UnormSrgb
}else{
image_dds::ImageFormat::BC7RgbaUnormSrgb
};
//this fails if the image dimensions are not a multiple of 4
let dds=image_dds::dds_from_image(
&image,
format,
image_dds::Quality::Slow,
image_dds::Mipmaps::GeneratedAutomatic,
)?;
let file_name=format!("textures/{}.dds",asset_id.0);
let mut file=std::fs::File::create(file_name)?;
dds.write(&mut file)?;
Ok(())
}
async fn download_assets(paths:Vec<PathBuf>,cookie:rbx_asset::cookie::Cookie)->AResult<()>{
tokio::try_join!(
tokio::fs::create_dir_all("downloaded_textures"),
tokio::fs::create_dir_all("textures"),
tokio::fs::create_dir_all("meshes"),
tokio::fs::create_dir_all("unions"),
)?;
// use mpsc
let thread_limit=std::thread::available_parallelism()?.get();
let (send_assets,mut recv_assets)=tokio::sync::mpsc::channel(DOWNLOAD_LIMIT);
let (send_texture,mut recv_texture)=tokio::sync::mpsc::channel(thread_limit);
// map decode dispatcher
// read files multithreaded
// produce UniqueAssetsResult per file
tokio::spawn(async move{
// move send so it gets dropped when all maps have been decoded
// closing the channel
let mut it=paths.into_iter();
static SEM:tokio::sync::Semaphore=tokio::sync::Semaphore::const_new(0);
SEM.add_permits(thread_limit);
while let (Ok(permit),Some(path))=(SEM.acquire().await,it.next()){
let send=send_assets.clone();
tokio::spawn(async move{
let result=unique_assets(path.as_path()).await;
_=send.send(result).await;
drop(permit);
});
}
});
// download manager
// insert into global unique assets guy
// add to download queue if the asset is globally unique and does not already exist on disk
let mut stats=Stats::default();
let context=rbx_asset::cookie::CookieContext::new(cookie);
let mut globally_unique_assets=UniqueAssets::default();
// pop a job = retry_queue.pop_front() or ingest(recv.recv().await)
// SLOW MODE:
// acquire all permits
// drop all permits
// pop one job
// if it succeeds go into fast mode
// FAST MODE:
// acquire one permit
// pop a job
let download_thread=tokio::spawn(async move{
while let Some(result)=recv_assets.recv().await{
let unique_assets=match result{
Ok(unique_assets)=>unique_assets,
Err(e)=>{
println!("error: {e:?}");
continue;
},
};
for texture_id in unique_assets.textures{
if globally_unique_assets.textures.insert(texture_id){
let data=download_retry(&mut stats,&context,DownloadType::Texture(texture_id)).await?;
send_texture.send((texture_id,data)).await?;
}
}
for mesh_id in unique_assets.meshes{
if globally_unique_assets.meshes.insert(mesh_id){
download_retry(&mut stats,&context,DownloadType::Mesh(mesh_id)).await?;
}
}
for union_id in unique_assets.unions{
if globally_unique_assets.unions.insert(union_id){
download_retry(&mut stats,&context,DownloadType::Union(union_id)).await?;
}
}
}
dbg!(stats);
Ok::<(),anyhow::Error>(())
});
static SEM:tokio::sync::Semaphore=tokio::sync::Semaphore::const_new(0);
SEM.add_permits(thread_limit);
while let (Ok(permit),Some((asset_id,download_result)))=(SEM.acquire().await,recv_texture.recv().await){
tokio::spawn(async move{
let result=convert_texture(asset_id,download_result).await;
drop(permit);
result.unwrap();
});
}
download_thread.await??;
_=SEM.acquire_many(thread_limit as u32).await.unwrap();
Ok(())
}
#[derive(Debug)]
#[allow(dead_code)]
enum ConvertError{
IO(std::io::Error),
SNFMap(strafesnet_snf::map::Error),
RobloxRead(strafesnet_rbx_loader::ReadError),
RobloxLoad(strafesnet_rbx_loader::LoadError),
}
impl std::fmt::Display for ConvertError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for ConvertError{}
async fn convert_to_snf(path:&Path,output_folder:PathBuf)->AResult<()>{
let entire_file=tokio::fs::read(path).await?;
let model=strafesnet_rbx_loader::read(
std::io::Cursor::new(entire_file)
).map_err(ConvertError::RobloxRead)?;
let mut place=model.into_place();
place.run_scripts();
let map=place.to_snf(LoadFailureMode::DefaultToNone).map_err(ConvertError::RobloxLoad)?;
let mut dest=output_folder;
dest.push(path.file_stem().unwrap());
dest.set_extension("snfm");
let file=std::fs::File::create(dest).map_err(ConvertError::IO)?;
strafesnet_snf::map::write_map(file,map).map_err(ConvertError::SNFMap)?;
Ok(())
}
async fn roblox_to_snf(paths:Vec<std::path::PathBuf>,output_folder:PathBuf)->AResult<()>{
let start=std::time::Instant::now();
let thread_limit=std::thread::available_parallelism()?.get();
let mut it=paths.into_iter();
static SEM:tokio::sync::Semaphore=tokio::sync::Semaphore::const_new(0);
SEM.add_permits(thread_limit);
while let (Ok(permit),Some(path))=(SEM.acquire().await,it.next()){
let output_folder=output_folder.clone();
tokio::spawn(async move{
let result=convert_to_snf(path.as_path(),output_folder).await;
drop(permit);
match result{
Ok(())=>(),
Err(e)=>println!("Convert error: {e:?}"),
}
});
}
_=SEM.acquire_many(thread_limit as u32).await.unwrap();
println!("elapsed={:?}", start.elapsed());
Ok(())
}

@ -1,514 +0,0 @@
use std::path::{Path,PathBuf};
use std::borrow::Cow;
use clap::{Args,Subcommand};
use anyhow::Result as AResult;
use futures::StreamExt;
use strafesnet_bsp_loader::loader::BspFinder;
use strafesnet_deferred_loader::loader::Loader;
use strafesnet_deferred_loader::deferred_loader::{LoadFailureMode,MeshDeferredLoader,RenderConfigDeferredLoader};
use vbsp_entities_css::Entity;
#[derive(Subcommand)]
pub enum Commands{
SourceToSNF(SourceToSNFSubcommand),
ExtractTextures(ExtractTexturesSubcommand),
VPKContents(VPKContentsSubcommand),
BSPContents(BSPContentsSubcommand),
}
#[derive(Args)]
pub struct SourceToSNFSubcommand {
#[arg(long)]
output_folder:PathBuf,
#[arg(required=true)]
input_files:Vec<PathBuf>,
#[arg(long)]
vpks:Vec<PathBuf>,
}
#[derive(Args)]
pub struct ExtractTexturesSubcommand{
#[arg(required=true)]
bsp_files:Vec<PathBuf>,
#[arg(long)]
vpks:Vec<PathBuf>,
}
#[derive(Args)]
pub struct VPKContentsSubcommand {
#[arg(long)]
input_file:PathBuf,
}
#[derive(Args)]
pub struct BSPContentsSubcommand {
#[arg(long)]
input_file:PathBuf,
}
impl Commands{
pub async fn run(self)->AResult<()>{
match self{
Commands::SourceToSNF(subcommand)=>source_to_snf(subcommand.input_files,subcommand.output_folder,subcommand.vpks).await,
Commands::ExtractTextures(subcommand)=>extract_textures(subcommand.bsp_files,subcommand.vpks).await,
Commands::VPKContents(subcommand)=>vpk_contents(subcommand.input_file),
Commands::BSPContents(subcommand)=>bsp_contents(subcommand.input_file),
}
}
}
enum VMTContent{
VMT(String),
VTF(String),
Patch(vmt_parser::material::PatchMaterial),
Unsupported,//don't want to deal with whatever vmt variant
Unresolved,//could not locate a texture because of vmt content
}
impl VMTContent{
fn vtf(opt:Option<String>)->Self{
match opt{
Some(s)=>Self::VTF(s),
None=>Self::Unresolved,
}
}
}
fn get_some_texture(material:vmt_parser::material::Material)->VMTContent{
//just grab some texture from somewhere for now
match material{
vmt_parser::material::Material::LightMappedGeneric(mat)=>VMTContent::vtf(Some(mat.base_texture)),
vmt_parser::material::Material::VertexLitGeneric(mat)=>VMTContent::vtf(mat.base_texture.or(mat.decal_texture)),//this just dies if there is none
vmt_parser::material::Material::VertexLitGenericDx6(mat)=>VMTContent::vtf(mat.base_texture.or(mat.decal_texture)),
vmt_parser::material::Material::UnlitGeneric(mat)=>VMTContent::vtf(mat.base_texture),
vmt_parser::material::Material::UnlitTwoTexture(mat)=>VMTContent::vtf(mat.base_texture),
vmt_parser::material::Material::Water(mat)=>VMTContent::vtf(mat.base_texture),
vmt_parser::material::Material::WorldVertexTransition(mat)=>VMTContent::vtf(Some(mat.base_texture)),
vmt_parser::material::Material::EyeRefract(mat)=>VMTContent::vtf(Some(mat.cornea_texture)),
vmt_parser::material::Material::SubRect(mat)=>VMTContent::VMT(mat.material),//recursive
vmt_parser::material::Material::Sprite(mat)=>VMTContent::vtf(Some(mat.base_texture)),
vmt_parser::material::Material::SpriteCard(mat)=>VMTContent::vtf(mat.base_texture),
vmt_parser::material::Material::Cable(mat)=>VMTContent::vtf(Some(mat.base_texture)),
vmt_parser::material::Material::Refract(mat)=>VMTContent::vtf(mat.base_texture),
vmt_parser::material::Material::Modulate(mat)=>VMTContent::vtf(Some(mat.base_texture)),
vmt_parser::material::Material::DecalModulate(mat)=>VMTContent::vtf(Some(mat.base_texture)),
vmt_parser::material::Material::Sky(mat)=>VMTContent::vtf(Some(mat.base_texture)),
vmt_parser::material::Material::Replacements(_mat)=>VMTContent::Unsupported,
vmt_parser::material::Material::Patch(mat)=>VMTContent::Patch(mat),
_=>unreachable!(),
}
}
#[derive(Debug,thiserror::Error)]
enum GetVMTError{
#[error("Bsp error {0:?}")]
Bsp(#[from]vbsp::BspError),
#[error("Utf8 error {0:?}")]
Utf8(#[from]std::str::Utf8Error),
#[error("Vdf error {0:?}")]
Vdf(#[from]vmt_parser::VdfError),
#[error("Vmt not found")]
NotFound,
}
fn get_vmt(finder:BspFinder,search_name:&str)->Result<vmt_parser::material::Material,GetVMTError>{
let vmt_data=finder.find(search_name)?.ok_or(GetVMTError::NotFound)?;
//decode vmt and then write
let vmt_str=core::str::from_utf8(&vmt_data)?;
let material=vmt_parser::from_str(vmt_str)?;
//println!("vmt material={:?}",material);
Ok(material)
}
#[derive(Debug,thiserror::Error)]
enum LoadVMTError{
#[error("Bsp error {0:?}")]
Bsp(#[from]vbsp::BspError),
#[error("GetVMT error {0:?}")]
GetVMT(#[from]GetVMTError),
#[error("FromUtf8 error {0:?}")]
FromUtf8(#[from]std::string::FromUtf8Error),
#[error("Vdf error {0:?}")]
Vdf(#[from]vmt_parser::VdfError),
#[error("Vmt unsupported")]
Unsupported,
#[error("Vmt unresolved")]
Unresolved,
#[error("Vmt not found")]
NotFound,
}
fn recursive_vmt_loader<'bsp,'vpk,'a>(finder:BspFinder<'bsp,'vpk>,material:vmt_parser::material::Material)->Result<Option<Cow<'a,[u8]>>,LoadVMTError>
where
'bsp:'a,
'vpk:'a,
{
match get_some_texture(material){
VMTContent::VMT(mut s)=>{
s.make_ascii_lowercase();
recursive_vmt_loader(finder,get_vmt(finder,&s)?)
},
VMTContent::VTF(s)=>{
let mut texture_file_name=PathBuf::from("materials");
texture_file_name.push(s);
texture_file_name.set_extension("vtf");
texture_file_name.as_mut_os_str().make_ascii_lowercase();
Ok(finder.find(texture_file_name.to_str().unwrap())?)
},
VMTContent::Patch(mat)=>recursive_vmt_loader(finder,
mat.resolve(|search_name|{
let name_lowercase=search_name.to_lowercase();
match finder.find(&name_lowercase)?{
Some(bytes)=>Ok(String::from_utf8(bytes.into_owned())?),
None=>Err(LoadVMTError::NotFound),
}
})?
),
VMTContent::Unsupported=>Err(LoadVMTError::Unsupported),
VMTContent::Unresolved=>Err(LoadVMTError::Unresolved),
}
}
fn load_texture<'bsp,'vpk,'a>(finder:BspFinder<'bsp,'vpk>,texture_name:&str)->Result<Option<Cow<'a,[u8]>>,LoadVMTError>
where
'bsp:'a,
'vpk:'a,
{
let mut texture_file_name=PathBuf::from("materials");
//lower case
texture_file_name.push(texture_name);
texture_file_name.as_mut_os_str().make_ascii_lowercase();
//remove stem and search for both vtf and vmt files
let stem=texture_file_name.file_stem().unwrap().to_owned();
texture_file_name.pop();
texture_file_name.push(stem);
if let Some(stuff)=finder.find(texture_file_name.to_str().unwrap())?{
return Ok(Some(stuff));
}
// search for both vmt,vtf
let mut texture_file_name_vmt=texture_file_name.clone();
texture_file_name_vmt.set_extension("vmt");
let get_vmt_result=get_vmt(finder,texture_file_name_vmt.to_str().unwrap());
match get_vmt_result{
Ok(material)=>{
let vmt_result=recursive_vmt_loader(finder,material);
match vmt_result{
Ok(Some(stuff))=>return Ok(Some(stuff)),
Ok(None)
|Err(LoadVMTError::NotFound)=>(),
|Err(LoadVMTError::GetVMT(GetVMTError::NotFound))=>(),
Err(e)=>return Err(e),
}
}
|Err(GetVMTError::NotFound)=>(),
Err(e)=>Err(e)?,
}
// try looking for vtf
let mut texture_file_name_vtf=texture_file_name.clone();
texture_file_name_vtf.set_extension("vtf");
let get_vtf_result=get_vmt(finder,texture_file_name_vtf.to_str().unwrap());
match get_vtf_result{
Ok(material)=>{
let vtf_result=recursive_vmt_loader(finder,material);
match vtf_result{
Ok(Some(stuff))=>return Ok(Some(stuff)),
Ok(None)
|Err(LoadVMTError::NotFound)=>(),
|Err(LoadVMTError::GetVMT(GetVMTError::NotFound))=>(),
Err(e)=>return Err(e),
}
}
|Err(GetVMTError::NotFound)=>(),
Err(e)=>Err(e)?,
}
Ok(None)
}
#[derive(Debug,thiserror::Error)]
enum ExtractTextureError{
#[error("Io error {0:?}")]
Io(#[from]std::io::Error),
#[error("Bsp error {0:?}")]
Bsp(#[from]vbsp::BspError),
#[error("MeshLoad error {0:?}")]
MeshLoad(#[from]strafesnet_bsp_loader::loader::MeshError),
#[error("Load VMT error {0:?}")]
LoadVMT(#[from]LoadVMTError),
}
async fn gimme_them_textures(path:&Path,vpk_list:&[strafesnet_bsp_loader::Vpk],send_texture:tokio::sync::mpsc::Sender<(Vec<u8>,String)>)->Result<(),ExtractTextureError>{
let bsp=vbsp::Bsp::read(tokio::fs::read(path).await?.as_ref())?;
let loader_bsp=strafesnet_bsp_loader::Bsp::new(bsp);
let bsp=loader_bsp.as_ref();
let mut texture_deferred_loader=RenderConfigDeferredLoader::new();
for texture in bsp.textures(){
texture_deferred_loader.acquire_render_config_id(Some(Cow::Borrowed(texture.name())));
}
let mut mesh_deferred_loader=MeshDeferredLoader::new();
for name in &bsp.static_props.dict.name{
mesh_deferred_loader.acquire_mesh_id(name.as_str());
}
for raw_ent in &bsp.entities{
let model=match raw_ent.parse(){
Ok(Entity::Cycler(brush))=>brush.model,
Ok(Entity::EnvSprite(brush))=>brush.model,
Ok(Entity::FuncBreakable(brush))=>brush.model,
Ok(Entity::FuncBrush(brush))=>brush.model,
Ok(Entity::FuncButton(brush))=>brush.model,
Ok(Entity::FuncDoor(brush))=>brush.model,
Ok(Entity::FuncDoorRotating(brush))=>brush.model,
Ok(Entity::FuncIllusionary(brush))=>brush.model,
Ok(Entity::FuncMonitor(brush))=>brush.model,
Ok(Entity::FuncMovelinear(brush))=>brush.model,
Ok(Entity::FuncPhysbox(brush))=>brush.model,
Ok(Entity::FuncPhysboxMultiplayer(brush))=>brush.model,
Ok(Entity::FuncRotButton(brush))=>brush.model,
Ok(Entity::FuncRotating(brush))=>brush.model,
Ok(Entity::FuncTracktrain(brush))=>brush.model,
Ok(Entity::FuncTrain(brush))=>brush.model,
Ok(Entity::FuncWall(brush))=>brush.model,
Ok(Entity::FuncWallToggle(brush))=>brush.model,
Ok(Entity::FuncWaterAnalog(brush))=>brush.model,
Ok(Entity::PropDoorRotating(brush))=>brush.model,
Ok(Entity::PropDynamic(brush))=>brush.model,
Ok(Entity::PropDynamicOverride(brush))=>brush.model,
Ok(Entity::PropPhysics(brush))=>brush.model,
Ok(Entity::PropPhysicsMultiplayer(brush))=>brush.model,
Ok(Entity::PropPhysicsOverride(brush))=>brush.model,
Ok(Entity::PropRagdoll(brush))=>brush.model,
Ok(Entity::TriggerGravity(brush))=>brush.model,
Ok(Entity::TriggerHurt(brush))=>brush.model,
Ok(Entity::TriggerLook(brush))=>brush.model,
Ok(Entity::TriggerMultiple(brush))=>brush.model.unwrap_or_default(),
Ok(Entity::TriggerOnce(brush))=>brush.model,
Ok(Entity::TriggerProximity(brush))=>brush.model,
Ok(Entity::TriggerPush(brush))=>brush.model,
Ok(Entity::TriggerSoundscape(brush))=>brush.model,
Ok(Entity::TriggerTeleport(brush))=>brush.model.unwrap_or_default(),
Ok(Entity::TriggerVphysicsMotion(brush))=>brush.model,
Ok(Entity::TriggerWind(brush))=>brush.model,
_=>continue,
};
match model.chars().next(){
Some('*')=>(),
_=>{
mesh_deferred_loader.acquire_mesh_id(model);
},
}
}
let finder=BspFinder{
bsp:&loader_bsp,
vpks:vpk_list
};
let mut mesh_loader=strafesnet_bsp_loader::loader::ModelLoader::new(finder);
// load models and collect requested textures
for model_path in mesh_deferred_loader.into_indices(){
let model:vmdl::Model=match mesh_loader.load(model_path){
Ok(model)=>model,
Err(e)=>{
println!("Model={model_path} Load model error: {e}");
continue;
},
};
for texture in model.textures(){
for search_path in &texture.search_paths{
let mut path=PathBuf::from(search_path.as_str());
path.push(texture.name.as_str());
let path=path.to_str().unwrap().to_owned();
texture_deferred_loader.acquire_render_config_id(Some(Cow::Owned(path)));
}
}
}
for texture_path in texture_deferred_loader.into_indices(){
match load_texture(finder,&texture_path){
Ok(Some(texture))=>send_texture.send(
(texture.into_owned(),texture_path.into_owned())
).await.unwrap(),
Ok(None)=>(),
Err(e)=>println!("Texture={texture_path} Load error: {e}"),
}
}
Ok(())
}
#[derive(Debug,thiserror::Error)]
enum ConvertTextureError{
#[error("Bsp error {0:?}")]
Bsp(#[from]vbsp::BspError),
#[error("Vtf error {0:?}")]
Vtf(#[from]vtf::Error),
#[error("DDS create error {0:?}")]
DDS(#[from]image_dds::CreateDdsError),
#[error("DDS write error {0:?}")]
DDSWrite(#[from]image_dds::ddsfile::Error),
#[error("Io error {0:?}")]
Io(#[from]std::io::Error),
}
async fn convert_texture(texture:Vec<u8>,write_file_name:impl AsRef<Path>)->Result<(),ConvertTextureError>{
let image=vtf::from_bytes(&texture)?.highres_image.decode(0)?.to_rgba8();
let format=if image.width()%4!=0||image.height()%4!=0{
image_dds::ImageFormat::Rgba8UnormSrgb
}else{
image_dds::ImageFormat::BC7RgbaUnormSrgb
};
//this fails if the image dimensions are not a multiple of 4
let dds = image_dds::dds_from_image(
&image,
format,
image_dds::Quality::Slow,
image_dds::Mipmaps::GeneratedAutomatic,
)?;
//write dds
let mut dest=PathBuf::from("textures");
dest.push(write_file_name);
dest.set_extension("dds");
std::fs::create_dir_all(dest.parent().unwrap())?;
let mut writer=std::io::BufWriter::new(std::fs::File::create(dest)?);
dds.write(&mut writer)?;
Ok(())
}
async fn read_vpks(vpk_paths:Vec<PathBuf>,thread_limit:usize)->Vec<strafesnet_bsp_loader::Vpk>{
futures::stream::iter(vpk_paths).map(|vpk_path|async{
// idk why it doesn't want to pass out the errors but this is fatal anyways
tokio::task::spawn_blocking(move||Ok::<_,vpk::Error>(strafesnet_bsp_loader::Vpk::new(vpk::VPK::read(&vpk_path)?))).await.unwrap().unwrap()
})
.buffer_unordered(thread_limit)
.collect().await
}
async fn extract_textures(paths:Vec<PathBuf>,vpk_paths:Vec<PathBuf>)->AResult<()>{
tokio::try_join!(
tokio::fs::create_dir_all("extracted_textures"),
tokio::fs::create_dir_all("textures"),
tokio::fs::create_dir_all("meshes"),
)?;
let thread_limit=std::thread::available_parallelism()?.get();
// load vpk list and leak for static lifetime
let vpk_list:&[strafesnet_bsp_loader::Vpk]=read_vpks(vpk_paths,thread_limit).await.leak();
let (send_texture,mut recv_texture)=tokio::sync::mpsc::channel(thread_limit);
let mut it=paths.into_iter();
let extract_thread=tokio::spawn(async move{
static SEM:tokio::sync::Semaphore=tokio::sync::Semaphore::const_new(0);
SEM.add_permits(thread_limit);
while let (Ok(permit),Some(path))=(SEM.acquire().await,it.next()){
let send=send_texture.clone();
tokio::spawn(async move{
let result=gimme_them_textures(&path,vpk_list,send).await;
drop(permit);
match result{
Ok(())=>(),
Err(e)=>println!("Map={path:?} Decode error: {e:?}"),
}
});
}
});
// convert images
static SEM:tokio::sync::Semaphore=tokio::sync::Semaphore::const_new(0);
SEM.add_permits(thread_limit);
while let (Ok(permit),Some((data,dest)))=(SEM.acquire().await,recv_texture.recv().await){
// TODO: dedup dest?
tokio::spawn(async move{
let result=convert_texture(data,dest).await;
drop(permit);
match result{
Ok(())=>(),
Err(e)=>println!("Convert error: {e:?}"),
}
});
}
extract_thread.await?;
_=SEM.acquire_many(thread_limit as u32).await?;
Ok(())
}
fn vpk_contents(vpk_path:PathBuf)->AResult<()>{
let vpk_index=vpk::VPK::read(&vpk_path)?;
for (label,entry) in vpk_index.tree.into_iter(){
println!("vpk label={} entry={:?}",label,entry);
}
Ok(())
}
fn bsp_contents(path:PathBuf)->AResult<()>{
let bsp=vbsp::Bsp::read(std::fs::read(path)?.as_ref())?;
for file_name in bsp.pack.into_zip().into_inner().unwrap().file_names(){
println!("file_name={:?}",file_name);
}
Ok(())
}
#[derive(Debug)]
#[allow(dead_code)]
enum ConvertError{
IO(std::io::Error),
SNFMap(strafesnet_snf::map::Error),
BspRead(strafesnet_bsp_loader::ReadError),
BspLoad(strafesnet_bsp_loader::LoadError),
}
impl std::fmt::Display for ConvertError{
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
write!(f,"{self:?}")
}
}
impl std::error::Error for ConvertError{}
async fn convert_to_snf(path:&Path,vpk_list:&[strafesnet_bsp_loader::Vpk],output_folder:PathBuf)->AResult<()>{
let entire_file=tokio::fs::read(path).await?;
let bsp=strafesnet_bsp_loader::read(
std::io::Cursor::new(entire_file)
).map_err(ConvertError::BspRead)?;
let map=bsp.to_snf(LoadFailureMode::DefaultToNone,vpk_list).map_err(ConvertError::BspLoad)?;
let mut dest=output_folder;
dest.push(path.file_stem().unwrap());
dest.set_extension("snfm");
let file=std::fs::File::create(dest).map_err(ConvertError::IO)?;
strafesnet_snf::map::write_map(file,map).map_err(ConvertError::SNFMap)?;
Ok(())
}
async fn source_to_snf(paths:Vec<std::path::PathBuf>,output_folder:PathBuf,vpk_paths:Vec<PathBuf>)->AResult<()>{
let start=std::time::Instant::now();
let thread_limit=std::thread::available_parallelism()?.get();
// load vpk list and leak for static lifetime
let vpk_list:&[strafesnet_bsp_loader::Vpk]=read_vpks(vpk_paths,thread_limit).await.leak();
let mut it=paths.into_iter();
static SEM:tokio::sync::Semaphore=tokio::sync::Semaphore::const_new(0);
SEM.add_permits(thread_limit);
while let (Ok(permit),Some(path))=(SEM.acquire().await,it.next()){
let output_folder=output_folder.clone();
tokio::spawn(async move{
let result=convert_to_snf(path.as_path(),vpk_list,output_folder).await;
drop(permit);
match result{
Ok(())=>(),
Err(e)=>println!("Convert error: {e:?}"),
}
});
}
_=SEM.acquire_many(thread_limit as u32).await.unwrap();
println!("elapsed={:?}", start.elapsed());
Ok(())
}

@ -1,7 +1,7 @@
[package] [package]
name = "strafe-client" name = "strafe-client"
version = "0.11.0" version = "0.10.5"
edition = "2024" edition = "2021"
repository = "https://git.itzana.me/StrafesNET/strafe-project" repository = "https://git.itzana.me/StrafesNET/strafe-project"
license = "Custom" license = "Custom"
description = "StrafesNET game client for bhop and surf." description = "StrafesNET game client for bhop and surf."
@ -9,24 +9,25 @@ authors = ["Rhys Lloyd <krakow20@gmail.com>"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features] [features]
user-install=[] # as opposed to portable install
default = ["snf"] default = ["snf"]
snf = ["dep:strafesnet_snf"] snf = ["dep:strafesnet_snf"]
source = ["dep:strafesnet_deferred_loader", "dep:strafesnet_bsp_loader"] source = ["dep:strafesnet_deferred_loader", "dep:strafesnet_bsp_loader"]
roblox = ["dep:strafesnet_deferred_loader", "dep:strafesnet_rbx_loader"] roblox = ["dep:strafesnet_deferred_loader", "dep:strafesnet_rbx_loader"]
[dependencies] [dependencies]
glam = "0.30.0" arrayvec = "0.7.6"
bytemuck = { version = "1.13.1", features = ["derive"] }
configparser = "3.0.2"
ddsfile = "0.5.1"
glam = "0.29.0"
id = { version = "0.1.0", registry = "strafesnet" }
parking_lot = "0.12.1" parking_lot = "0.12.1"
pollster = "0.4.0" pollster = "0.4.0"
replace_with = "0.1.7"
strafesnet_bsp_loader = { path = "../lib/bsp_loader", registry = "strafesnet", optional = true } strafesnet_bsp_loader = { path = "../lib/bsp_loader", registry = "strafesnet", optional = true }
strafesnet_common = { path = "../lib/common", registry = "strafesnet" } strafesnet_common = { path = "../lib/common", registry = "strafesnet" }
strafesnet_deferred_loader = { path = "../lib/deferred_loader", registry = "strafesnet", optional = true } strafesnet_deferred_loader = { path = "../lib/deferred_loader", features = ["legacy"], registry = "strafesnet", optional = true }
strafesnet_graphics = { path = "../engine/graphics", registry = "strafesnet" }
strafesnet_physics = { path = "../engine/physics", registry = "strafesnet" }
strafesnet_rbx_loader = { path = "../lib/rbx_loader", registry = "strafesnet", optional = true } strafesnet_rbx_loader = { path = "../lib/rbx_loader", registry = "strafesnet", optional = true }
strafesnet_session = { path = "../engine/session", registry = "strafesnet" }
strafesnet_settings = { path = "../engine/settings", registry = "strafesnet" }
strafesnet_snf = { path = "../lib/snf", registry = "strafesnet", optional = true } strafesnet_snf = { path = "../lib/snf", registry = "strafesnet", optional = true }
wgpu = "24.0.0" wgpu = "23.0.1"
winit = "0.30.7" winit = "0.30.7"

@ -1,68 +0,0 @@
use crate::window::Instruction;
use strafesnet_common::integer;
use strafesnet_common::instruction::TimedInstruction;
use strafesnet_common::session::Time as SessionTime;
pub struct App<'a>{
root_time:std::time::Instant,
window_thread:crate::compat_worker::QNWorker<'a,TimedInstruction<Instruction,SessionTime>>,
}
impl<'a> App<'a>{
pub fn new(
root_time:std::time::Instant,
window_thread:crate::compat_worker::QNWorker<'a,TimedInstruction<Instruction,SessionTime>>,
)->App<'a>{
Self{
root_time,
window_thread,
}
}
fn send_timed_instruction(&mut self,instruction:Instruction){
let time=integer::Time::from_nanos(self.root_time.elapsed().as_nanos() as i64);
self.window_thread.send(TimedInstruction{time,instruction}).unwrap();
}
}
impl winit::application::ApplicationHandler for App<'_>{
fn resumed(&mut self,_event_loop:&winit::event_loop::ActiveEventLoop){
//
}
fn window_event(
&mut self,
event_loop:&winit::event_loop::ActiveEventLoop,
_window_id:winit::window::WindowId,
event:winit::event::WindowEvent,
){
match event{
winit::event::WindowEvent::KeyboardInput{
event:winit::event::KeyEvent{
logical_key:winit::keyboard::Key::Named(winit::keyboard::NamedKey::Escape),
state:winit::event::ElementState::Pressed,
..
},
..
}
|winit::event::WindowEvent::CloseRequested=>{
event_loop.exit();
},
_=>(),
}
self.send_timed_instruction(Instruction::WindowEvent(event));
}
fn device_event(
&mut self,
_event_loop:&winit::event_loop::ActiveEventLoop,
_device_id:winit::event::DeviceId,
event:winit::event::DeviceEvent,
){
self.send_timed_instruction(Instruction::DeviceEvent(event));
}
fn about_to_wait(
&mut self,
_event_loop:&winit::event_loop::ActiveEventLoop
){
self.send_timed_instruction(Instruction::WindowEvent(winit::event::WindowEvent::RedrawRequested));
}
}

@ -31,23 +31,15 @@ impl<T> Body<T>
time, time,
} }
} }
pub const fn relative_to<'a>(&'a self,body0:&'a Body<T>)->VirtualBody<'a,T>{
//(p0,v0,a0,t0)
//(p1,v1,a1,t1)
VirtualBody{
body0,
body1:self,
}
}
pub fn extrapolated_position(&self,time:Time<T>)->Planar64Vec3{ pub fn extrapolated_position(&self,time:Time<T>)->Planar64Vec3{
let dt=time-self.time; let dt=time-self.time;
self.position self.position
+(self.velocity*dt).map(|elem|elem.divide().clamp_1()) +(self.velocity*dt).map(|elem|elem.divide().fix_1())
+self.acceleration.map(|elem|(dt*dt*elem/2).divide().clamp_1()) +self.acceleration.map(|elem|(dt*dt*elem/2).divide().fix_1())
} }
pub fn extrapolated_velocity(&self,time:Time<T>)->Planar64Vec3{ pub fn extrapolated_velocity(&self,time:Time<T>)->Planar64Vec3{
let dt=time-self.time; let dt=time-self.time;
self.velocity+(self.acceleration*dt).map(|elem|elem.divide().clamp_1()) self.velocity+(self.acceleration*dt).map(|elem|elem.divide().fix_1())
} }
pub fn advance_time(&mut self,time:Time<T>){ pub fn advance_time(&mut self,time:Time<T>){
self.position=self.extrapolated_position(time); self.position=self.extrapolated_position(time);
@ -71,12 +63,12 @@ impl<T> Body<T>
D2:Copy, D2:Copy,
Planar64:core::ops::Mul<D2,Output=N4>, Planar64:core::ops::Mul<D2,Output=N4>,
N4:integer::Divide<D2,Output=T1>, N4:integer::Divide<D2,Output=T1>,
T1:integer::Clamp<Planar64>, T1:integer::Fix<Planar64>,
{ {
// a*dt^2/2 + v*dt + p // a*dt^2/2 + v*dt + p
// (a*dt/2+v)*dt+p // (a*dt/2+v)*dt+p
(self.acceleration.map(|elem|dt*elem/2)+self.velocity).map(|elem|dt.mul_ratio(elem)) (self.acceleration.map(|elem|dt*elem/2)+self.velocity).map(|elem|dt.mul_ratio(elem))
.map(|elem|elem.divide().clamp())+self.position .map(|elem|elem.divide().fix())+self.position
} }
pub fn extrapolated_velocity_ratio_dt<Num,Den,N1,T1>(&self,dt:integer::Ratio<Num,Den>)->Planar64Vec3 pub fn extrapolated_velocity_ratio_dt<Num,Den,N1,T1>(&self,dt:integer::Ratio<Num,Den>)->Planar64Vec3
where where
@ -85,12 +77,12 @@ impl<T> Body<T>
Num:core::ops::Mul<Planar64,Output=N1>, Num:core::ops::Mul<Planar64,Output=N1>,
Planar64:core::ops::Mul<Den,Output=N1>, Planar64:core::ops::Mul<Den,Output=N1>,
N1:integer::Divide<Den,Output=T1>, N1:integer::Divide<Den,Output=T1>,
T1:integer::Clamp<Planar64>, T1:integer::Fix<Planar64>,
{ {
// a*dt + v // a*dt + v
self.acceleration.map(|elem|(dt*elem).divide().clamp())+self.velocity self.acceleration.map(|elem|(dt*elem).divide().fix())+self.velocity
} }
pub fn advance_time_ratio_dt(&mut self,dt:crate::model::GigaTime){ pub fn advance_time_ratio_dt(&mut self,dt:crate::model_physics::GigaTime){
self.position=self.extrapolated_position_ratio_dt(dt); self.position=self.extrapolated_position_ratio_dt(dt);
self.velocity=self.extrapolated_velocity_ratio_dt(dt); self.velocity=self.extrapolated_velocity_ratio_dt(dt);
self.time+=dt.into(); self.time+=dt.into();
@ -145,6 +137,14 @@ pub struct VirtualBody<'a,T>{
impl<T> VirtualBody<'_,T> impl<T> VirtualBody<'_,T>
where Time<T>:Copy, where Time<T>:Copy,
{ {
pub const fn relative<'a>(body0:&'a Body<T>,body1:&'a Body<T>)->VirtualBody<'a,T>{
//(p0,v0,a0,t0)
//(p1,v1,a1,t1)
VirtualBody{
body0,
body1,
}
}
pub fn extrapolated_position(&self,time:Time<T>)->Planar64Vec3{ pub fn extrapolated_position(&self,time:Time<T>)->Planar64Vec3{
self.body1.extrapolated_position(time)-self.body0.extrapolated_position(time) self.body1.extrapolated_position(time)-self.body0.extrapolated_position(time)
} }

Some files were not shown because too many files have changed in this diff Show More