Compare commits

..

1 Commits

11 changed files with 651 additions and 698 deletions

3
.gitignore vendored
View File

@@ -1 +1,2 @@
/target
Cargo.lock
target/

View File

@@ -1,23 +1,24 @@
[package]
name = "strafe-client"
version = "0.2.0"
version = "0.2.1"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-executor = "1.5.1"
bytemuck = { version = "1.13.1", features = ["derive"] }
bytemuck = { version = "1.14.0", features = ["derive"] }
ddsfile = "0.5.1"
env_logger = "0.10.0"
glam = "0.24.1"
log = "0.4.20"
logga = "0.1.2"
obj = "0.10.2"
pollster = "0.3.0"
wgpu = "0.17.0"
winit = "0.28.6"
[profile.release]
lto = true
strip = true
codegen-units = 1
lto = true
opt-level = "z"
panic = "abort"
strip = true

View File

@@ -7,4 +7,4 @@ In development client for jumping on squares (and riding on triangles)
1. Have rust and git installed
2. `git clone https://git.itzana.me/StrafesNET/strafe-client`
3. `cd strafe-client`
4. `cargo run --release`
4. `cargo run --release`

View File

@@ -1,120 +0,0 @@
pub struct Body {
pub position: glam::Vec3,//I64 where 2^32 = 1 u
pub velocity: glam::Vec3,//I64 where 2^32 = 1 u/s
pub time: TIME,//nanoseconds x xxxxD!
}
pub struct PhysicsState {
pub body: Body,
//pub contacts: Vec<RelativeCollision>,
pub time: TIME,
pub strafe_tick_rate: TIME,
pub tick: u32,
pub mv: f32,
pub walkspeed: f32,
pub friction: f32,
pub gravity: glam::Vec3,
pub grounded: bool,
pub jump_trying: bool,
}
pub type TIME = i64;
const CONTROL_JUMP:u32 = 0b01000000;//temp
impl PhysicsState {
//delete this, we are tickless gamers
pub fn run(&mut self, time: TIME, control_dir: glam::Vec3, controls: u32){
let target_tick = (time/10_000_000) as u32;//100t
//the game code can run for 1 month before running out of ticks
while self.tick<target_tick {
self.tick += 1;
let dt=0.01;
let d=self.body.velocity.dot(control_dir);
if d<self.mv {
self.body.velocity+=(self.mv-d)*control_dir;
}
self.body.velocity+=self.gravity*dt;
self.body.position+=self.body.velocity*dt;
if self.body.position.y<0.0{
self.body.position.y=0.0;
self.body.velocity.y=0.0;
self.grounded=true;
}
if self.grounded&&(controls&CONTROL_JUMP)!=0 {
self.grounded=false;
self.body.velocity+=glam::Vec3::new(0.0,0.715588/2.0*100.0,0.0);
}
if self.grounded {
let applied_friction=self.friction*dt;
let targetv=control_dir*self.walkspeed;
let diffv=targetv-self.body.velocity;
if applied_friction*applied_friction<diffv.length_squared() {
self.body.velocity+=applied_friction*diffv.normalize();
} else {
self.body.velocity=targetv;
}
}
}
self.body.time=target_tick as TIME*10_000_000;
}
//delete this
pub fn extrapolate_position(&self, time: TIME) -> glam::Vec3 {
let dt=(time-self.body.time) as f64/1_000_000_000f64;
self.body.position+self.body.velocity*(dt as f32)+self.gravity*((0.5*dt*dt) as f32)
}
fn next_strafe_event(&self) -> Option<crate::event::EventStruct> {
return Some(crate::event::EventStruct{
time:self.time/self.strafe_tick_rate*self.strafe_tick_rate,//this is floor(n) I need ceil(n)+1
event:crate::event::EventEnum::StrafeTick
});
}
}
impl crate::event::EventTrait for PhysicsState {
//this little next event function can cache its return value and invalidate the cached value by watching the State.
fn next_event(&self) -> Option<crate::event::EventStruct> {
//JUST POLLING!!! NO MUTATION
let mut best_event: Option<crate::event::EventStruct> = None;
let collect_event = |test_event:Option<crate::event::EventStruct>|{
match test_event {
Some(unwrap_test_event) => match best_event {
Some(unwrap_best_event) => if unwrap_test_event.time<unwrap_best_event.time {
best_event=test_event;
},
None => best_event=test_event,
},
None => (),
}
};
//check to see if yee need to jump (this is not the way lol)
if self.grounded&&self.jump_trying {
//scroll will be implemented with InputEvent::InstantJump rather than InputEvent::Jump(true)
collect_event(Some(crate::event::EventStruct{
time:self.time,
event:crate::event::EventEnum::Jump
}));
}
//check for collision stop events with curent contacts
for collision_data in self.contacts.iter() {
collect_event(self.model.predict_collision(collision_data.model));
}
//check for collision start events (against every part in the game with no optimization!!)
for &model in self.world.models {
collect_event(self.model.predict_collision(&model));
}
//check to see when the next strafe tick is
collect_event(self.next_strafe_event());
best_event
}
}
//something that implements body + hitbox can predict collision
impl crate::sweep::PredictCollision for Model {
fn predict_collision(&self,other:&Model) -> Option<crate::event::EventStruct> {
//math!
None
}
}

View File

@@ -1,15 +0,0 @@
pub struct EventStruct {
pub time: crate::body::TIME,
pub event: EventEnum,
}
pub enum EventEnum {
CollisionStart,//(Collideable),//Body::CollisionStart
CollisionEnd,//(Collideable),//Body::CollisionEnd
StrafeTick,
Jump,
}
pub trait EventTrait {
fn next_event(&self) -> Option<EventStruct>;
}

View File

@@ -1,516 +0,0 @@
use std::future::Future;
#[cfg(target_arch = "wasm32")]
use std::str::FromStr;
#[cfg(not(target_arch = "wasm32"))]
use std::time::Instant;
#[cfg(target_arch = "wasm32")]
use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas};
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
#[allow(dead_code)]
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
use std::{mem::size_of, slice::from_raw_parts};
unsafe { from_raw_parts(data.as_ptr() as *const u8, data.len() * size_of::<T>()) }
}
#[allow(dead_code)]
pub enum ShaderStage {
Vertex,
Fragment,
Compute,
}
pub trait Example: 'static + Sized {
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
wgpu::DownlevelCapabilities {
flags: wgpu::DownlevelFlags::empty(),
shader_model: wgpu::ShaderModel::Sm5,
..wgpu::DownlevelCapabilities::default()
}
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware
}
fn init(
config: &wgpu::SurfaceConfiguration,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
config: &wgpu::SurfaceConfiguration,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn move_mouse(&mut self, delta: (f64,f64));
fn render(
&mut self,
view: &wgpu::TextureView,
device: &wgpu::Device,
queue: &wgpu::Queue,
spawner: &Spawner,
);
}
struct Setup {
window: winit::window::Window,
event_loop: EventLoop<()>,
instance: wgpu::Instance,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::DeviceEvent {
event:
winit::event::DeviceEvent::MouseMotion {
delta,
},
..
} => {
example.move_mouse(delta);
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if !is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
// This allows treating the framework as a standalone example,
// thus avoiding listing the example names in `Cargo.toml`.
#[allow(dead_code)]
fn main() {}

View File

@@ -1,3 +1,414 @@
pub mod framework;
pub mod body;
pub mod event;
#[cfg(not(target_arch = "wasm32"))]
use std::time::Instant;
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
use logga;
#[allow(dead_code)]
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
use std::{mem::size_of, slice::from_raw_parts};
unsafe { from_raw_parts(data.as_ptr() as *const u8, data.len() * size_of::<T>()) }
}
#[allow(dead_code)]
pub enum ShaderStage {
Vertex,
Fragment,
Compute,
}
pub trait Example: 'static + Sized {
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
wgpu::DownlevelCapabilities {
flags: wgpu::DownlevelFlags::empty(),
shader_model: wgpu::ShaderModel::Sm5,
..wgpu::DownlevelCapabilities::default()
}
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware
}
fn init(
config: &wgpu::SurfaceConfiguration,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
config: &wgpu::SurfaceConfiguration,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn move_mouse(&mut self, delta: (f64,f64));
fn render(
&mut self,
view: &wgpu::TextureView,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
}
struct Setup {
window: winit::window::Window,
event_loop: EventLoop<()>,
instance: wgpu::Instance,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
//logga::init();
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
logga::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
logga::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
logga::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
logga::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
logga::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
logga::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
logga::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::DeviceEvent {
event:
winit::event::DeviceEvent::MouseMotion {
delta,
},
..
} => {
example.move_mouse(delta);
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
logga::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}

View File

@@ -1,3 +1,163 @@
//#![no_std]
#![no_main]
// I'm trying to replace the stdlib functions so don't delete this yet
/*
extern crate std;
use std::*;
pub struct Vec<T> {
data: [T; 16],
len: usize,
}
impl<T> Vec<T> {
pub fn new() -> Self {
Vec {
data: Default::default(),
len: 0,
}
}
pub fn push(&mut self, value: T) -> Result<(), &'static str> {
if self.len < self.data.len() {
self.data[self.len] = value;
self.len += 1;
Ok(())
} else {
Err("Vec is full")
}
}
pub fn pop(&mut self) -> Option<T> {
if self.len > 0 {
self.len -= 1;
Some(std::mem::replace(&mut self.data[self.len], Default::default()))
} else {
None
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn capacity(&self) -> usize {
self.data.len()
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
}
pub struct Iter<'a, T> {
vec: &'a Vec<T>,
index: usize,
}
impl<'a, T> Vec<T> {
// ... previous methods ...
pub fn iter(&'a self) -> Iter<'a, T> {
Iter {
vec: self,
index: 0,
}
}
}
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.vec.len {
let item = &self.vec.data[self.index];
self.index += 1;
Some(item)
} else {
None
}
}
}
impl<T> Vec<T> {
// (Previous methods omitted for brevity)
pub fn drain(&mut self, range: std::ops::Range<usize>) -> Drain<T> {
Drain {
vec: self,
range,
}
}
}
pub struct Drain<'a, T> {
vec: &'a mut Vec<T>,
range: std::ops::Range<usize>,
}
impl<'a, T> Iterator for Drain<'a, T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if let Some(index) = self.range.next_back() {
if index < self.vec.len {
// Swap and remove the element from the Vec
let last_index = self.vec.len - 1;
self.vec.len -= 1;
Some(std::mem::replace(&mut self.vec.data[index], Default::default()))
} else {
None
}
} else {
None
}
}
}
*/
//extern crate logga;
//extern crate libc;
/*
#[panic_handler]
fn my_panic(_info: &core::panic::PanicInfo) -> ! {
loop {}
}
*/
//use logga;
use bytemuck::{Pod, Zeroable};
use std::{borrow::Cow, time::Instant};
use wgpu::{util::DeviceExt, AstcBlock, AstcChannel};
@@ -34,12 +194,20 @@ struct Model {
// Note: we use the Y=up coordinate space in this example.
struct Camera {
time: Instant,
pos: glam::Vec3,
vel: glam::Vec3,
gravity: glam::Vec3,
friction: f32,
screen_size: (u32, u32),
offset: glam::Vec3,
fov: f32,
yaw: f32,
pitch: f32,
controls: u32,
mv: f32,
grounded: bool,
walkspeed: f32,
}
const CONTROL_MOVEFORWARD:u32 = 0b00000001;
@@ -92,7 +260,7 @@ fn get_control_dir(controls: u32) -> glam::Vec3{
}
impl Camera {
fn to_uniform_data(&self, pos: glam::Vec3) -> [f32; 16 * 3 + 4] {
fn to_uniform_data(&self) -> [f32; 16 * 3 + 4] {
let aspect = self.screen_size.0 as f32 / self.screen_size.1 as f32;
let fov = if self.controls&CONTROL_ZOOM==0 {
self.fov
@@ -101,7 +269,7 @@ impl Camera {
};
let proj = perspective_rh(fov, aspect, 0.5, 1000.0);
let proj_inv = proj.inverse();
let view = glam::Mat4::from_translation(pos+self.offset) * glam::Mat4::from_euler(glam::EulerRot::YXZ, self.yaw, self.pitch, 0f32);
let view = glam::Mat4::from_translation(self.pos+self.offset) * glam::Mat4::from_euler(glam::EulerRot::YXZ, self.yaw, self.pitch, 0f32);
let view_inv = view.inverse();
let mut raw = [0f32; 16 * 3 + 4];
@@ -114,9 +282,7 @@ impl Camera {
}
pub struct Skybox {
start_time: std::time::Instant,
camera: Camera,
physics: strafe_client::body::PhysicsState,
sky_pipeline: wgpu::RenderPipeline,
entity_pipeline: wgpu::RenderPipeline,
ground_pipeline: wgpu::RenderPipeline,
@@ -209,7 +375,7 @@ fn add_obj(device:&wgpu::Device,modeldatas:& mut Vec<ModelData>,source:&[u8]){
}
}
impl strafe_client::framework::Example for Skybox {
impl strafe_client::Example for Skybox {
fn optional_features() -> wgpu::Features {
wgpu::Features::TEXTURE_COMPRESSION_ASTC
| wgpu::Features::TEXTURE_COMPRESSION_ETC2
@@ -284,30 +450,22 @@ impl strafe_client::framework::Example for Skybox {
});
let camera = Camera {
time: Instant::now(),
pos: glam::Vec3::new(5.0,0.0,5.0),
vel: glam::Vec3::new(0.0,0.0,0.0),
gravity: glam::Vec3::new(0.0,-100.0,0.0),
friction: 90.0,
screen_size: (config.width, config.height),
offset: glam::Vec3::new(0.0,4.5,0.0),
fov: 1.0, //fov_slope = tan(fov_y/2)
pitch: 0.0,
yaw: 0.0,
controls:0,
};
let physics = strafe_client::body::PhysicsState {
body: strafe_client::body::Body {
position: glam::Vec3::new(5.0,0.0,5.0),
velocity: glam::Vec3::new(0.0,0.0,0.0),
time: 0,
},
time: 0,
tick: 0,
tick_rate: 100,
gravity: glam::Vec3::new(0.0,-100.0,0.0),
friction: 90.0,
mv: 2.7,
controls:0,
grounded: true,
walkspeed: 18.0,
};
let camera_uniforms = camera.to_uniform_data(physics.extrapolate_position(0));
let camera_uniforms = camera.to_uniform_data();
let camera_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Camera"),
contents: bytemuck::cast_slice(&camera_uniforms),
@@ -421,19 +579,19 @@ impl strafe_client::framework::Example for Skybox {
let device_features = device.features();
let skybox_format = if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ASTC) {
log::info!("Using ASTC");
logga::info!("Using ASTC");
wgpu::TextureFormat::Astc {
block: AstcBlock::B4x4,
channel: AstcChannel::UnormSrgb,
}
} else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ETC2) {
log::info!("Using ETC2");
logga::info!("Using ETC2");
wgpu::TextureFormat::Etc2Rgb8UnormSrgb
} else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_BC) {
log::info!("Using BC");
logga::info!("Using BC");
wgpu::TextureFormat::Bc1RgbaUnormSrgb
} else {
log::info!("Using plain");
logga::info!("Using plain");
wgpu::TextureFormat::Bgra8UnormSrgb
};
@@ -449,7 +607,7 @@ impl strafe_client::framework::Example for Skybox {
};
let max_mips = layer_size.max_mips(wgpu::TextureDimension::D2);
log::debug!(
logga::debug!(
"Copying {:?} skybox images of size {}, {}, 6 with {} mips to gpu",
skybox_format,
IMAGE_SIZE,
@@ -541,9 +699,7 @@ impl strafe_client::framework::Example for Skybox {
let depth_view = Self::create_depth_texture(config, device);
Skybox {
start_time: Instant::now(),
camera,
physics,
sky_pipeline,
entity_pipeline,
ground_pipeline,
@@ -627,20 +783,45 @@ impl strafe_client::framework::Example for Skybox {
view: &wgpu::TextureView,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &strafe_client::framework::Spawner,
) {
let time = Instant::now();
//physique
let dt=(time-self.camera.time).as_secs_f32();
self.camera.time=time;
let camera_mat=glam::Mat3::from_euler(glam::EulerRot::YXZ,self.camera.yaw,0f32,0f32);
let control_dir=camera_mat*get_control_dir(self.camera.controls&(CONTROL_MOVELEFT|CONTROL_MOVERIGHT|CONTROL_MOVEFORWARD|CONTROL_MOVEBACK)).normalize_or_zero();
let time=self.start_time.elapsed().as_nanos() as i64;
self.physics.run(time,control_dir,self.camera.controls);
let d=self.camera.vel.dot(control_dir);
if d<self.camera.mv {
self.camera.vel+=(self.camera.mv-d)*control_dir;
}
self.camera.vel+=self.camera.gravity*dt;
self.camera.pos+=self.camera.vel*dt;
if self.camera.pos.y<0.0{
self.camera.pos.y=0.0;
self.camera.vel.y=0.0;
self.camera.grounded=true;
}
if self.camera.grounded&&(self.camera.controls&CONTROL_JUMP)!=0 {
self.camera.grounded=false;
self.camera.vel+=glam::Vec3::new(0.0,0.715588/2.0*100.0,0.0);
}
if self.camera.grounded {
let applied_friction=self.camera.friction*dt;
let targetv=control_dir*self.camera.walkspeed;
let diffv=targetv-self.camera.vel;
if applied_friction*applied_friction<diffv.length_squared() {
self.camera.vel+=applied_friction*diffv.normalize();
} else {
self.camera.vel=targetv;
}
}
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
// update rotation
let camera_uniforms = self.camera.to_uniform_data(self.physics.extrapolate_position(time));
let camera_uniforms = self.camera.to_uniform_data();
self.staging_belt
.write_buffer(
&mut encoder,
@@ -719,10 +900,12 @@ impl strafe_client::framework::Example for Skybox {
}
}
fn main() {
strafe_client::framework::run::<Skybox>(
#[no_mangle]
fn main(_argc: isize, _argv: *const *const u8) {
strafe_client::run::<Skybox>(
format!("Strafe Client v{}",
env!("CARGO_PKG_VERSION")
).as_str()
);
}
}

8
std/lib.rs Normal file
View File

@@ -0,0 +1,8 @@
#[export_macro]
macro_rules! insssfo {
($($arg:tt)*) => {
};
}