diff --git a/src/framework.rs b/src/framework.rs index 3fe157c..d18609f 100644 --- a/src/framework.rs +++ b/src/framework.rs @@ -6,438 +6,438 @@ use std::time::Instant; #[cfg(target_arch = "wasm32")] use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas}; use winit::{ - event::{self, WindowEvent}, - event_loop::{ControlFlow, EventLoop}, + event::{self, WindowEvent}, + event_loop::{ControlFlow, EventLoop}, }; #[allow(dead_code)] pub fn cast_slice(data: &[T]) -> &[u8] { - use std::{mem::size_of, slice::from_raw_parts}; + use std::{mem::size_of, slice::from_raw_parts}; - unsafe { from_raw_parts(data.as_ptr() as *const u8, data.len() * size_of::()) } + unsafe { from_raw_parts(data.as_ptr() as *const u8, data.len() * size_of::()) } } #[allow(dead_code)] pub enum ShaderStage { - Vertex, - Fragment, - Compute, + Vertex, + Fragment, + Compute, } pub trait Example: 'static + Sized { - fn optional_features() -> wgpu::Features { - wgpu::Features::empty() - } - fn required_features() -> wgpu::Features { - wgpu::Features::empty() - } - fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities { - wgpu::DownlevelCapabilities { - flags: wgpu::DownlevelFlags::empty(), - shader_model: wgpu::ShaderModel::Sm5, - ..wgpu::DownlevelCapabilities::default() - } - } - fn required_limits() -> wgpu::Limits { - wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware - } - fn init( - config: &wgpu::SurfaceConfiguration, - adapter: &wgpu::Adapter, - device: &wgpu::Device, - queue: &wgpu::Queue, - ) -> Self; - fn resize( - &mut self, - config: &wgpu::SurfaceConfiguration, - device: &wgpu::Device, - queue: &wgpu::Queue, - ); - fn update(&mut self, event: WindowEvent); - fn move_mouse(&mut self, delta: (f64,f64)); - fn render( - &mut self, - view: &wgpu::TextureView, - device: &wgpu::Device, - queue: &wgpu::Queue, - spawner: &Spawner, - ); + fn optional_features() -> wgpu::Features { + wgpu::Features::empty() + } + fn required_features() -> wgpu::Features { + wgpu::Features::empty() + } + fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities { + wgpu::DownlevelCapabilities { + flags: wgpu::DownlevelFlags::empty(), + shader_model: wgpu::ShaderModel::Sm5, + ..wgpu::DownlevelCapabilities::default() + } + } + fn required_limits() -> wgpu::Limits { + wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware + } + fn init( + config: &wgpu::SurfaceConfiguration, + adapter: &wgpu::Adapter, + device: &wgpu::Device, + queue: &wgpu::Queue, + ) -> Self; + fn resize( + &mut self, + config: &wgpu::SurfaceConfiguration, + device: &wgpu::Device, + queue: &wgpu::Queue, + ); + fn update(&mut self, event: WindowEvent); + fn move_mouse(&mut self, delta: (f64,f64)); + fn render( + &mut self, + view: &wgpu::TextureView, + device: &wgpu::Device, + queue: &wgpu::Queue, + spawner: &Spawner, + ); } struct Setup { - window: winit::window::Window, - event_loop: EventLoop<()>, - instance: wgpu::Instance, - size: winit::dpi::PhysicalSize, - surface: wgpu::Surface, - adapter: wgpu::Adapter, - device: wgpu::Device, - queue: wgpu::Queue, - #[cfg(target_arch = "wasm32")] - offscreen_canvas_setup: Option, + window: winit::window::Window, + event_loop: EventLoop<()>, + instance: wgpu::Instance, + size: winit::dpi::PhysicalSize, + surface: wgpu::Surface, + adapter: wgpu::Adapter, + device: wgpu::Device, + queue: wgpu::Queue, + #[cfg(target_arch = "wasm32")] + offscreen_canvas_setup: Option, } #[cfg(target_arch = "wasm32")] struct OffscreenCanvasSetup { - offscreen_canvas: OffscreenCanvas, - bitmap_renderer: ImageBitmapRenderingContext, + offscreen_canvas: OffscreenCanvas, + bitmap_renderer: ImageBitmapRenderingContext, } async fn setup(title: &str) -> Setup { - #[cfg(not(target_arch = "wasm32"))] - { - env_logger::init(); - }; + #[cfg(not(target_arch = "wasm32"))] + { + env_logger::init(); + }; - let event_loop = EventLoop::new(); - let mut builder = winit::window::WindowBuilder::new(); - builder = builder.with_title(title); - #[cfg(windows_OFF)] // TODO - { - use winit::platform::windows::WindowBuilderExtWindows; - builder = builder.with_no_redirection_bitmap(true); - } - let window = builder.build(&event_loop).unwrap(); + let event_loop = EventLoop::new(); + let mut builder = winit::window::WindowBuilder::new(); + builder = builder.with_title(title); + #[cfg(windows_OFF)] // TODO + { + use winit::platform::windows::WindowBuilderExtWindows; + builder = builder.with_no_redirection_bitmap(true); + } + let window = builder.build(&event_loop).unwrap(); - #[cfg(target_arch = "wasm32")] - { - use winit::platform::web::WindowExtWebSys; - let query_string = web_sys::window().unwrap().location().search().unwrap(); - let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG") - .and_then(|x| x.parse().ok()) - .unwrap_or(log::Level::Error); - console_log::init_with_level(level).expect("could not initialize logger"); - std::panic::set_hook(Box::new(console_error_panic_hook::hook)); - // On wasm, append the canvas to the document body - web_sys::window() - .and_then(|win| win.document()) - .and_then(|doc| doc.body()) - .and_then(|body| { - body.append_child(&web_sys::Element::from(window.canvas())) - .ok() - }) - .expect("couldn't append canvas to document body"); - } + #[cfg(target_arch = "wasm32")] + { + use winit::platform::web::WindowExtWebSys; + let query_string = web_sys::window().unwrap().location().search().unwrap(); + let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG") + .and_then(|x| x.parse().ok()) + .unwrap_or(log::Level::Error); + console_log::init_with_level(level).expect("could not initialize logger"); + std::panic::set_hook(Box::new(console_error_panic_hook::hook)); + // On wasm, append the canvas to the document body + web_sys::window() + .and_then(|win| win.document()) + .and_then(|doc| doc.body()) + .and_then(|body| { + body.append_child(&web_sys::Element::from(window.canvas())) + .ok() + }) + .expect("couldn't append canvas to document body"); + } - #[cfg(target_arch = "wasm32")] - let mut offscreen_canvas_setup: Option = None; - #[cfg(target_arch = "wasm32")] - { - use wasm_bindgen::JsCast; - use winit::platform::web::WindowExtWebSys; + #[cfg(target_arch = "wasm32")] + let mut offscreen_canvas_setup: Option = None; + #[cfg(target_arch = "wasm32")] + { + use wasm_bindgen::JsCast; + use winit::platform::web::WindowExtWebSys; - let query_string = web_sys::window().unwrap().location().search().unwrap(); - if let Some(offscreen_canvas_param) = - parse_url_query_string(&query_string, "offscreen_canvas") - { - if FromStr::from_str(offscreen_canvas_param) == Ok(true) { - log::info!("Creating OffscreenCanvasSetup"); + let query_string = web_sys::window().unwrap().location().search().unwrap(); + if let Some(offscreen_canvas_param) = + parse_url_query_string(&query_string, "offscreen_canvas") + { + if FromStr::from_str(offscreen_canvas_param) == Ok(true) { + log::info!("Creating OffscreenCanvasSetup"); - let offscreen_canvas = - OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas"); + let offscreen_canvas = + OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas"); - let bitmap_renderer = window - .canvas() - .get_context("bitmaprenderer") - .expect("couldn't create ImageBitmapRenderingContext (Result)") - .expect("couldn't create ImageBitmapRenderingContext (Option)") - .dyn_into::() - .expect("couldn't convert into ImageBitmapRenderingContext"); + let bitmap_renderer = window + .canvas() + .get_context("bitmaprenderer") + .expect("couldn't create ImageBitmapRenderingContext (Result)") + .expect("couldn't create ImageBitmapRenderingContext (Option)") + .dyn_into::() + .expect("couldn't convert into ImageBitmapRenderingContext"); - offscreen_canvas_setup = Some(OffscreenCanvasSetup { - offscreen_canvas, - bitmap_renderer, - }) - } - } - }; + offscreen_canvas_setup = Some(OffscreenCanvasSetup { + offscreen_canvas, + bitmap_renderer, + }) + } + } + }; - log::info!("Initializing the surface..."); + log::info!("Initializing the surface..."); - let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all); - let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(); + let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all); + let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(); - let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { - backends, - dx12_shader_compiler, - }); - let (size, surface) = unsafe { - let size = window.inner_size(); + let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { + backends, + dx12_shader_compiler, + }); + let (size, surface) = unsafe { + let size = window.inner_size(); - #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] - let surface = instance.create_surface(&window).unwrap(); - #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] - let surface = { - if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup { - log::info!("Creating surface from OffscreenCanvas"); - instance.create_surface_from_offscreen_canvas( - offscreen_canvas_setup.offscreen_canvas.clone(), - ) - } else { - instance.create_surface(&window) - } - } - .unwrap(); + #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] + let surface = instance.create_surface(&window).unwrap(); + #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] + let surface = { + if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup { + log::info!("Creating surface from OffscreenCanvas"); + instance.create_surface_from_offscreen_canvas( + offscreen_canvas_setup.offscreen_canvas.clone(), + ) + } else { + instance.create_surface(&window) + } + } + .unwrap(); - (size, surface) - }; - let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface)) - .await - .expect("No suitable GPU adapters found on the system!"); + (size, surface) + }; + let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface)) + .await + .expect("No suitable GPU adapters found on the system!"); - #[cfg(not(target_arch = "wasm32"))] - { - let adapter_info = adapter.get_info(); - println!("Using {} ({:?})", adapter_info.name, adapter_info.backend); - } + #[cfg(not(target_arch = "wasm32"))] + { + let adapter_info = adapter.get_info(); + println!("Using {} ({:?})", adapter_info.name, adapter_info.backend); + } - let optional_features = E::optional_features(); - let required_features = E::required_features(); - let adapter_features = adapter.features(); - assert!( - adapter_features.contains(required_features), - "Adapter does not support required features for this example: {:?}", - required_features - adapter_features - ); + let optional_features = E::optional_features(); + let required_features = E::required_features(); + let adapter_features = adapter.features(); + assert!( + adapter_features.contains(required_features), + "Adapter does not support required features for this example: {:?}", + required_features - adapter_features + ); - let required_downlevel_capabilities = E::required_downlevel_capabilities(); - let downlevel_capabilities = adapter.get_downlevel_capabilities(); - assert!( - downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model, - "Adapter does not support the minimum shader model required to run this example: {:?}", - required_downlevel_capabilities.shader_model - ); - assert!( - downlevel_capabilities - .flags - .contains(required_downlevel_capabilities.flags), - "Adapter does not support the downlevel capabilities required to run this example: {:?}", - required_downlevel_capabilities.flags - downlevel_capabilities.flags - ); + let required_downlevel_capabilities = E::required_downlevel_capabilities(); + let downlevel_capabilities = adapter.get_downlevel_capabilities(); + assert!( + downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model, + "Adapter does not support the minimum shader model required to run this example: {:?}", + required_downlevel_capabilities.shader_model + ); + assert!( + downlevel_capabilities + .flags + .contains(required_downlevel_capabilities.flags), + "Adapter does not support the downlevel capabilities required to run this example: {:?}", + required_downlevel_capabilities.flags - downlevel_capabilities.flags + ); - // Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface. - let needed_limits = E::required_limits().using_resolution(adapter.limits()); + // Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface. + let needed_limits = E::required_limits().using_resolution(adapter.limits()); - let trace_dir = std::env::var("WGPU_TRACE"); - let (device, queue) = adapter - .request_device( - &wgpu::DeviceDescriptor { - label: None, - features: (optional_features & adapter_features) | required_features, - limits: needed_limits, - }, - trace_dir.ok().as_ref().map(std::path::Path::new), - ) - .await - .expect("Unable to find a suitable GPU adapter!"); + let trace_dir = std::env::var("WGPU_TRACE"); + let (device, queue) = adapter + .request_device( + &wgpu::DeviceDescriptor { + label: None, + features: (optional_features & adapter_features) | required_features, + limits: needed_limits, + }, + trace_dir.ok().as_ref().map(std::path::Path::new), + ) + .await + .expect("Unable to find a suitable GPU adapter!"); - Setup { - window, - event_loop, - instance, - size, - surface, - adapter, - device, - queue, - #[cfg(target_arch = "wasm32")] - offscreen_canvas_setup, - } + Setup { + window, + event_loop, + instance, + size, + surface, + adapter, + device, + queue, + #[cfg(target_arch = "wasm32")] + offscreen_canvas_setup, + } } fn start( - #[cfg(not(target_arch = "wasm32"))] Setup { - window, - event_loop, - instance, - size, - surface, - adapter, - device, - queue, - }: Setup, - #[cfg(target_arch = "wasm32")] Setup { - window, - event_loop, - instance, - size, - surface, - adapter, - device, - queue, - offscreen_canvas_setup, - }: Setup, + #[cfg(not(target_arch = "wasm32"))] Setup { + window, + event_loop, + instance, + size, + surface, + adapter, + device, + queue, + }: Setup, + #[cfg(target_arch = "wasm32")] Setup { + window, + event_loop, + instance, + size, + surface, + adapter, + device, + queue, + offscreen_canvas_setup, + }: Setup, ) { - let spawner = Spawner::new(); - let mut config = surface - .get_default_config(&adapter, size.width, size.height) - .expect("Surface isn't supported by the adapter."); - let surface_view_format = config.format.add_srgb_suffix(); - config.view_formats.push(surface_view_format); - surface.configure(&device, &config); + let spawner = Spawner::new(); + let mut config = surface + .get_default_config(&adapter, size.width, size.height) + .expect("Surface isn't supported by the adapter."); + let surface_view_format = config.format.add_srgb_suffix(); + config.view_formats.push(surface_view_format); + surface.configure(&device, &config); - log::info!("Initializing the example..."); - let mut example = E::init(&config, &adapter, &device, &queue); + log::info!("Initializing the example..."); + let mut example = E::init(&config, &adapter, &device, &queue); - #[cfg(not(target_arch = "wasm32"))] - let mut last_frame_inst = Instant::now(); - #[cfg(not(target_arch = "wasm32"))] - let (mut frame_count, mut accum_time) = (0, 0.0); + #[cfg(not(target_arch = "wasm32"))] + let mut last_frame_inst = Instant::now(); + #[cfg(not(target_arch = "wasm32"))] + let (mut frame_count, mut accum_time) = (0, 0.0); - log::info!("Entering render loop..."); - event_loop.run(move |event, _, control_flow| { - let _ = (&instance, &adapter); // force ownership by the closure - *control_flow = if cfg!(feature = "metal-auto-capture") { - ControlFlow::Exit - } else { - ControlFlow::Poll - }; - match event { - event::Event::RedrawEventsCleared => { - #[cfg(not(target_arch = "wasm32"))] - spawner.run_until_stalled(); + log::info!("Entering render loop..."); + event_loop.run(move |event, _, control_flow| { + let _ = (&instance, &adapter); // force ownership by the closure + *control_flow = if cfg!(feature = "metal-auto-capture") { + ControlFlow::Exit + } else { + ControlFlow::Poll + }; + match event { + event::Event::RedrawEventsCleared => { + #[cfg(not(target_arch = "wasm32"))] + spawner.run_until_stalled(); - window.request_redraw(); - } - event::Event::WindowEvent { - event: - WindowEvent::Resized(size) - | WindowEvent::ScaleFactorChanged { - new_inner_size: &mut size, - .. - }, - .. - } => { - // Once winit is fixed, the detection conditions here can be removed. - // https://github.com/rust-windowing/winit/issues/2876 - let max_dimension = adapter.limits().max_texture_dimension_2d; - if size.width > max_dimension || size.height > max_dimension { - log::warn!( - "The resizing size {:?} exceeds the limit of {}.", - size, - max_dimension - ); - } else { - log::info!("Resizing to {:?}", size); - config.width = size.width.max(1); - config.height = size.height.max(1); - example.resize(&config, &device, &queue); - surface.configure(&device, &config); - } - } - event::Event::WindowEvent { event, .. } => match event { - WindowEvent::KeyboardInput { - input: - event::KeyboardInput { - virtual_keycode: Some(event::VirtualKeyCode::Escape), - state: event::ElementState::Pressed, - .. - }, - .. - } - | WindowEvent::CloseRequested => { - *control_flow = ControlFlow::Exit; - } - #[cfg(not(target_arch = "wasm32"))] - WindowEvent::KeyboardInput { - input: - event::KeyboardInput { - virtual_keycode: Some(event::VirtualKeyCode::R), - state: event::ElementState::Pressed, - .. - }, - .. - } => { - println!("{:#?}", instance.generate_report()); - } - _ => { - example.update(event); - } - }, - event::Event::DeviceEvent { - event: - winit::event::DeviceEvent::MouseMotion { - delta, - }, - .. - } => { - example.move_mouse(delta); - }, - event::Event::RedrawRequested(_) => { - #[cfg(not(target_arch = "wasm32"))] - { - accum_time += last_frame_inst.elapsed().as_secs_f32(); - last_frame_inst = Instant::now(); - frame_count += 1; - if frame_count == 100 { - println!( - "Avg frame time {}ms", - accum_time * 1000.0 / frame_count as f32 - ); - accum_time = 0.0; - frame_count = 0; - } - } + window.request_redraw(); + } + event::Event::WindowEvent { + event: + WindowEvent::Resized(size) + | WindowEvent::ScaleFactorChanged { + new_inner_size: &mut size, + .. + }, + .. + } => { + // Once winit is fixed, the detection conditions here can be removed. + // https://github.com/rust-windowing/winit/issues/2876 + let max_dimension = adapter.limits().max_texture_dimension_2d; + if size.width > max_dimension || size.height > max_dimension { + log::warn!( + "The resizing size {:?} exceeds the limit of {}.", + size, + max_dimension + ); + } else { + log::info!("Resizing to {:?}", size); + config.width = size.width.max(1); + config.height = size.height.max(1); + example.resize(&config, &device, &queue); + surface.configure(&device, &config); + } + } + event::Event::WindowEvent { event, .. } => match event { + WindowEvent::KeyboardInput { + input: + event::KeyboardInput { + virtual_keycode: Some(event::VirtualKeyCode::Escape), + state: event::ElementState::Pressed, + .. + }, + .. + } + | WindowEvent::CloseRequested => { + *control_flow = ControlFlow::Exit; + } + #[cfg(not(target_arch = "wasm32"))] + WindowEvent::KeyboardInput { + input: + event::KeyboardInput { + virtual_keycode: Some(event::VirtualKeyCode::R), + state: event::ElementState::Pressed, + .. + }, + .. + } => { + println!("{:#?}", instance.generate_report()); + } + _ => { + example.update(event); + } + }, + event::Event::DeviceEvent { + event: + winit::event::DeviceEvent::MouseMotion { + delta, + }, + .. + } => { + example.move_mouse(delta); + }, + event::Event::RedrawRequested(_) => { + #[cfg(not(target_arch = "wasm32"))] + { + accum_time += last_frame_inst.elapsed().as_secs_f32(); + last_frame_inst = Instant::now(); + frame_count += 1; + if frame_count == 100 { + println!( + "Avg frame time {}ms", + accum_time * 1000.0 / frame_count as f32 + ); + accum_time = 0.0; + frame_count = 0; + } + } - let frame = match surface.get_current_texture() { - Ok(frame) => frame, - Err(_) => { - surface.configure(&device, &config); - surface - .get_current_texture() - .expect("Failed to acquire next surface texture!") - } - }; - let view = frame.texture.create_view(&wgpu::TextureViewDescriptor { - format: Some(surface_view_format), - ..wgpu::TextureViewDescriptor::default() - }); + let frame = match surface.get_current_texture() { + Ok(frame) => frame, + Err(_) => { + surface.configure(&device, &config); + surface + .get_current_texture() + .expect("Failed to acquire next surface texture!") + } + }; + let view = frame.texture.create_view(&wgpu::TextureViewDescriptor { + format: Some(surface_view_format), + ..wgpu::TextureViewDescriptor::default() + }); - example.render(&view, &device, &queue, &spawner); + example.render(&view, &device, &queue, &spawner); - frame.present(); + frame.present(); - #[cfg(target_arch = "wasm32")] - { - if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup { - let image_bitmap = offscreen_canvas_setup - .offscreen_canvas - .transfer_to_image_bitmap() - .expect("couldn't transfer offscreen canvas to image bitmap."); - offscreen_canvas_setup - .bitmap_renderer - .transfer_from_image_bitmap(&image_bitmap); + #[cfg(target_arch = "wasm32")] + { + if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup { + let image_bitmap = offscreen_canvas_setup + .offscreen_canvas + .transfer_to_image_bitmap() + .expect("couldn't transfer offscreen canvas to image bitmap."); + offscreen_canvas_setup + .bitmap_renderer + .transfer_from_image_bitmap(&image_bitmap); - log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer"); - } - } - } - _ => {} - } - }); + log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer"); + } + } + } + _ => {} + } + }); } #[cfg(not(target_arch = "wasm32"))] pub struct Spawner<'a> { - executor: async_executor::LocalExecutor<'a>, + executor: async_executor::LocalExecutor<'a>, } #[cfg(not(target_arch = "wasm32"))] impl<'a> Spawner<'a> { - fn new() -> Self { - Self { - executor: async_executor::LocalExecutor::new(), - } - } + fn new() -> Self { + Self { + executor: async_executor::LocalExecutor::new(), + } + } - #[allow(dead_code)] - pub fn spawn_local(&self, future: impl Future + 'a) { - self.executor.spawn(future).detach(); - } + #[allow(dead_code)] + pub fn spawn_local(&self, future: impl Future + 'a) { + self.executor.spawn(future).detach(); + } - fn run_until_stalled(&self) { - while self.executor.try_tick() {} - } + fn run_until_stalled(&self) { + while self.executor.try_tick() {} + } } #[cfg(target_arch = "wasm32")] @@ -445,69 +445,69 @@ pub struct Spawner {} #[cfg(target_arch = "wasm32")] impl Spawner { - fn new() -> Self { - Self {} - } + fn new() -> Self { + Self {} + } - #[allow(dead_code)] - pub fn spawn_local(&self, future: impl Future + 'static) { - wasm_bindgen_futures::spawn_local(future); - } + #[allow(dead_code)] + pub fn spawn_local(&self, future: impl Future + 'static) { + wasm_bindgen_futures::spawn_local(future); + } } #[cfg(not(target_arch = "wasm32"))] pub fn run(title: &str) { - let setup = pollster::block_on(setup::(title)); - start::(setup); + let setup = pollster::block_on(setup::(title)); + start::(setup); } #[cfg(target_arch = "wasm32")] pub fn run(title: &str) { - use wasm_bindgen::prelude::*; + use wasm_bindgen::prelude::*; - let title = title.to_owned(); - wasm_bindgen_futures::spawn_local(async move { - let setup = setup::(&title).await; - let start_closure = Closure::once_into_js(move || start::(setup)); + let title = title.to_owned(); + wasm_bindgen_futures::spawn_local(async move { + let setup = setup::(&title).await; + let start_closure = Closure::once_into_js(move || start::(setup)); - // make sure to handle JS exceptions thrown inside start. - // Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again. - // This is required, because winit uses JS exception for control flow to escape from `run`. - if let Err(error) = call_catch(&start_closure) { - let is_control_flow_exception = error.dyn_ref::().map_or(false, |e| { - e.message().includes("Using exceptions for control flow", 0) - }); + // make sure to handle JS exceptions thrown inside start. + // Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again. + // This is required, because winit uses JS exception for control flow to escape from `run`. + if let Err(error) = call_catch(&start_closure) { + let is_control_flow_exception = error.dyn_ref::().map_or(false, |e| { + e.message().includes("Using exceptions for control flow", 0) + }); - if !is_control_flow_exception { - web_sys::console::error_1(&error); - } - } + if !is_control_flow_exception { + web_sys::console::error_1(&error); + } + } - #[wasm_bindgen] - extern "C" { - #[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")] - fn call_catch(this: &JsValue) -> Result<(), JsValue>; - } - }); + #[wasm_bindgen] + extern "C" { + #[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")] + fn call_catch(this: &JsValue) -> Result<(), JsValue>; + } + }); } #[cfg(target_arch = "wasm32")] /// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a /// specific key out of it. pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> { - let query_string = query.strip_prefix('?')?; + let query_string = query.strip_prefix('?')?; - for pair in query_string.split('&') { - let mut pair = pair.split('='); - let key = pair.next()?; - let value = pair.next()?; + for pair in query_string.split('&') { + let mut pair = pair.split('='); + let key = pair.next()?; + let value = pair.next()?; - if key == search_key { - return Some(value); - } - } + if key == search_key { + return Some(value); + } + } - None + None } // This allows treating the framework as a standalone example, diff --git a/src/main.rs b/src/main.rs index bd20b34..dce64d5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,47 +7,47 @@ const IMAGE_SIZE: u32 = 128; #[derive(Clone, Copy, Pod, Zeroable)] #[repr(C)] struct Vertex { - pos: [f32; 3], - texture: [f32; 2], - normal: [f32; 3], + pos: [f32; 3], + texture: [f32; 2], + normal: [f32; 3], } struct Entity { - index_count: u32, - index_buf: wgpu::Buffer, + index_count: u32, + index_buf: wgpu::Buffer, } //temp? struct ModelData { - transform: glam::Affine3A, - vertex_buf: wgpu::Buffer, - entities: Vec, + transform: glam::Affine3A, + vertex_buf: wgpu::Buffer, + entities: Vec, } struct Model { - transform: glam::Affine3A, - vertex_buf: wgpu::Buffer, - entities: Vec, - bind_group: wgpu::BindGroup, - model_buf: wgpu::Buffer, + transform: glam::Affine3A, + vertex_buf: wgpu::Buffer, + entities: Vec, + bind_group: wgpu::BindGroup, + model_buf: wgpu::Buffer, } // Note: we use the Y=up coordinate space in this example. struct Camera { - time: Instant, - pos: glam::Vec3, - vel: glam::Vec3, - gravity: glam::Vec3, - friction: f32, - screen_size: (u32, u32), - offset: glam::Vec3, - fov: f32, - yaw: f32, - pitch: f32, - controls: u32, - mv: f32, - grounded: bool, - walkspeed: f32, + time: Instant, + pos: glam::Vec3, + vel: glam::Vec3, + gravity: glam::Vec3, + friction: f32, + screen_size: (u32, u32), + offset: glam::Vec3, + fov: f32, + yaw: f32, + pitch: f32, + controls: u32, + mv: f32, + grounded: bool, + walkspeed: f32, } const CONTROL_MOVEFORWARD:u32 = 0b00000001; @@ -64,687 +64,687 @@ const RIGHT_DIR:glam::Vec3 = glam::Vec3::new(1.0,0.0,0.0); const UP_DIR:glam::Vec3 = glam::Vec3::new(0.0,1.0,0.0); fn get_control_dir(controls: u32) -> glam::Vec3{ - //don't get fancy just do it - let mut control_dir:glam::Vec3 = glam::Vec3::new(0.0,0.0,0.0); - if controls & CONTROL_MOVEFORWARD == CONTROL_MOVEFORWARD { - control_dir+=FORWARD_DIR; - } - if controls & CONTROL_MOVEBACK == CONTROL_MOVEBACK { - control_dir+=-FORWARD_DIR; - } - if controls & CONTROL_MOVELEFT == CONTROL_MOVELEFT { - control_dir+=-RIGHT_DIR; - } - if controls & CONTROL_MOVERIGHT == CONTROL_MOVERIGHT { - control_dir+=RIGHT_DIR; - } - if controls & CONTROL_MOVEUP == CONTROL_MOVEUP { - control_dir+=UP_DIR; - } - if controls & CONTROL_MOVEDOWN == CONTROL_MOVEDOWN { - control_dir+=-UP_DIR; - } - return control_dir + //don't get fancy just do it + let mut control_dir:glam::Vec3 = glam::Vec3::new(0.0,0.0,0.0); + if controls & CONTROL_MOVEFORWARD == CONTROL_MOVEFORWARD { + control_dir+=FORWARD_DIR; + } + if controls & CONTROL_MOVEBACK == CONTROL_MOVEBACK { + control_dir+=-FORWARD_DIR; + } + if controls & CONTROL_MOVELEFT == CONTROL_MOVELEFT { + control_dir+=-RIGHT_DIR; + } + if controls & CONTROL_MOVERIGHT == CONTROL_MOVERIGHT { + control_dir+=RIGHT_DIR; + } + if controls & CONTROL_MOVEUP == CONTROL_MOVEUP { + control_dir+=UP_DIR; + } + if controls & CONTROL_MOVEDOWN == CONTROL_MOVEDOWN { + control_dir+=-UP_DIR; + } + return control_dir } - #[inline] - fn perspective_rh(fov_y_slope: f32, aspect_ratio: f32, z_near: f32, z_far: f32) -> glam::Mat4 { - //glam_assert!(z_near > 0.0 && z_far > 0.0); - let r = z_far / (z_near - z_far); - glam::Mat4::from_cols( - glam::Vec4::new(1.0/(fov_y_slope * aspect_ratio), 0.0, 0.0, 0.0), - glam::Vec4::new(0.0, 1.0/fov_y_slope, 0.0, 0.0), - glam::Vec4::new(0.0, 0.0, r, -1.0), - glam::Vec4::new(0.0, 0.0, r * z_near, 0.0), - ) - } + #[inline] + fn perspective_rh(fov_y_slope: f32, aspect_ratio: f32, z_near: f32, z_far: f32) -> glam::Mat4 { + //glam_assert!(z_near > 0.0 && z_far > 0.0); + let r = z_far / (z_near - z_far); + glam::Mat4::from_cols( + glam::Vec4::new(1.0/(fov_y_slope * aspect_ratio), 0.0, 0.0, 0.0), + glam::Vec4::new(0.0, 1.0/fov_y_slope, 0.0, 0.0), + glam::Vec4::new(0.0, 0.0, r, -1.0), + glam::Vec4::new(0.0, 0.0, r * z_near, 0.0), + ) + } impl Camera { - fn to_uniform_data(&self) -> [f32; 16 * 3 + 4] { - let aspect = self.screen_size.0 as f32 / self.screen_size.1 as f32; - let fov = if self.controls&CONTROL_ZOOM==0 { - self.fov - }else{ - self.fov/5.0 - }; - let proj = perspective_rh(fov, aspect, 0.5, 1000.0); - let proj_inv = proj.inverse(); - let view = glam::Mat4::from_translation(self.pos+self.offset) * glam::Mat4::from_euler(glam::EulerRot::YXZ, self.yaw, self.pitch, 0f32); - let view_inv = view.inverse(); + fn to_uniform_data(&self) -> [f32; 16 * 3 + 4] { + let aspect = self.screen_size.0 as f32 / self.screen_size.1 as f32; + let fov = if self.controls&CONTROL_ZOOM==0 { + self.fov + }else{ + self.fov/5.0 + }; + let proj = perspective_rh(fov, aspect, 0.5, 1000.0); + let proj_inv = proj.inverse(); + let view = glam::Mat4::from_translation(self.pos+self.offset) * glam::Mat4::from_euler(glam::EulerRot::YXZ, self.yaw, self.pitch, 0f32); + let view_inv = view.inverse(); - let mut raw = [0f32; 16 * 3 + 4]; - raw[..16].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj)[..]); - raw[16..32].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj_inv)[..]); - raw[32..48].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&view_inv)[..]); - raw[48..52].copy_from_slice(AsRef::<[f32; 4]>::as_ref(&view.col(3))); - raw - } + let mut raw = [0f32; 16 * 3 + 4]; + raw[..16].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj)[..]); + raw[16..32].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj_inv)[..]); + raw[32..48].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&view_inv)[..]); + raw[48..52].copy_from_slice(AsRef::<[f32; 4]>::as_ref(&view.col(3))); + raw + } } pub struct Skybox { - camera: Camera, - sky_pipeline: wgpu::RenderPipeline, - entity_pipeline: wgpu::RenderPipeline, - ground_pipeline: wgpu::RenderPipeline, - main_bind_group: wgpu::BindGroup, - camera_buf: wgpu::Buffer, - models: Vec, - depth_view: wgpu::TextureView, - staging_belt: wgpu::util::StagingBelt, + camera: Camera, + sky_pipeline: wgpu::RenderPipeline, + entity_pipeline: wgpu::RenderPipeline, + ground_pipeline: wgpu::RenderPipeline, + main_bind_group: wgpu::BindGroup, + camera_buf: wgpu::Buffer, + models: Vec, + depth_view: wgpu::TextureView, + staging_belt: wgpu::util::StagingBelt, } impl Skybox { - const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth24Plus; + const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth24Plus; - fn create_depth_texture( - config: &wgpu::SurfaceConfiguration, - device: &wgpu::Device, - ) -> wgpu::TextureView { - let depth_texture = device.create_texture(&wgpu::TextureDescriptor { - size: wgpu::Extent3d { - width: config.width, - height: config.height, - depth_or_array_layers: 1, - }, - mip_level_count: 1, - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - format: Self::DEPTH_FORMAT, - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - label: None, - view_formats: &[], - }); + fn create_depth_texture( + config: &wgpu::SurfaceConfiguration, + device: &wgpu::Device, + ) -> wgpu::TextureView { + let depth_texture = device.create_texture(&wgpu::TextureDescriptor { + size: wgpu::Extent3d { + width: config.width, + height: config.height, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: Self::DEPTH_FORMAT, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + label: None, + view_formats: &[], + }); - depth_texture.create_view(&wgpu::TextureViewDescriptor::default()) - } + depth_texture.create_view(&wgpu::TextureViewDescriptor::default()) + } } fn get_transform_uniform_data(transform:&glam::Affine3A) -> [f32; 4*4] { - let mut raw = [0f32; 4*4]; - raw[0..16].copy_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&glam::Mat4::from(*transform))[..]); - raw + let mut raw = [0f32; 4*4]; + raw[0..16].copy_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&glam::Mat4::from(*transform))[..]); + raw } fn add_obj(device:&wgpu::Device,modeldatas:& mut Vec,source:&[u8]){ - let data = obj::ObjData::load_buf(&source[..]).unwrap(); - let mut vertices = Vec::new(); - let mut vertex_index = std::collections::HashMap::::new(); - for object in data.objects { - let mut entities = Vec::::new(); - for group in object.groups { - let mut indices = Vec::new(); - for poly in group.polys { - for end_index in 2..poly.0.len() { - for &index in &[0, end_index - 1, end_index] { - let vert = poly.0[index]; - if let Some(&i)=vertex_index.get(&vert){ - indices.push(i as u16); - }else{ - let i=vertices.len() as u16; - vertices.push(Vertex { - pos: data.position[vert.0], - texture: data.texture[vert.1.unwrap()], - normal: data.normal[vert.2.unwrap()], - }); - vertex_index.insert(vert,i); - indices.push(i); - } - } - } - } - let index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Index"), - contents: bytemuck::cast_slice(&indices), - usage: wgpu::BufferUsages::INDEX, - }); - entities.push(Entity { - index_buf, - index_count: indices.len() as u32, - }); - } - let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Vertex"), - contents: bytemuck::cast_slice(&vertices), - usage: wgpu::BufferUsages::VERTEX, - }); - modeldatas.push(ModelData { - transform: glam::Affine3A::default(), - vertex_buf, - entities, - }) - } + let data = obj::ObjData::load_buf(&source[..]).unwrap(); + let mut vertices = Vec::new(); + let mut vertex_index = std::collections::HashMap::::new(); + for object in data.objects { + let mut entities = Vec::::new(); + for group in object.groups { + let mut indices = Vec::new(); + for poly in group.polys { + for end_index in 2..poly.0.len() { + for &index in &[0, end_index - 1, end_index] { + let vert = poly.0[index]; + if let Some(&i)=vertex_index.get(&vert){ + indices.push(i as u16); + }else{ + let i=vertices.len() as u16; + vertices.push(Vertex { + pos: data.position[vert.0], + texture: data.texture[vert.1.unwrap()], + normal: data.normal[vert.2.unwrap()], + }); + vertex_index.insert(vert,i); + indices.push(i); + } + } + } + } + let index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Index"), + contents: bytemuck::cast_slice(&indices), + usage: wgpu::BufferUsages::INDEX, + }); + entities.push(Entity { + index_buf, + index_count: indices.len() as u32, + }); + } + let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Vertex"), + contents: bytemuck::cast_slice(&vertices), + usage: wgpu::BufferUsages::VERTEX, + }); + modeldatas.push(ModelData { + transform: glam::Affine3A::default(), + vertex_buf, + entities, + }) + } } impl strafe_client::framework::Example for Skybox { - fn optional_features() -> wgpu::Features { - wgpu::Features::TEXTURE_COMPRESSION_ASTC - | wgpu::Features::TEXTURE_COMPRESSION_ETC2 - | wgpu::Features::TEXTURE_COMPRESSION_BC - } + fn optional_features() -> wgpu::Features { + wgpu::Features::TEXTURE_COMPRESSION_ASTC + | wgpu::Features::TEXTURE_COMPRESSION_ETC2 + | wgpu::Features::TEXTURE_COMPRESSION_BC + } - fn init( - config: &wgpu::SurfaceConfiguration, - _adapter: &wgpu::Adapter, - device: &wgpu::Device, - queue: &wgpu::Queue, - ) -> Self { - let mut modeldatas = Vec::::new(); - add_obj(device,& mut modeldatas,include_bytes!("../models/teslacyberv3.0.obj")); - add_obj(device,& mut modeldatas,include_bytes!("../models/suzanne.obj")); - add_obj(device,& mut modeldatas,include_bytes!("../models/teapot.obj")); - println!("models.len = {:?}", modeldatas.len()); - modeldatas[1].transform=glam::Affine3A::from_translation(glam::vec3(10.,5.,10.)); - modeldatas[2].transform=glam::Affine3A::from_translation(glam::vec3(-10.,5.,10.)); + fn init( + config: &wgpu::SurfaceConfiguration, + _adapter: &wgpu::Adapter, + device: &wgpu::Device, + queue: &wgpu::Queue, + ) -> Self { + let mut modeldatas = Vec::::new(); + add_obj(device,& mut modeldatas,include_bytes!("../models/teslacyberv3.0.obj")); + add_obj(device,& mut modeldatas,include_bytes!("../models/suzanne.obj")); + add_obj(device,& mut modeldatas,include_bytes!("../models/teapot.obj")); + println!("models.len = {:?}", modeldatas.len()); + modeldatas[1].transform=glam::Affine3A::from_translation(glam::vec3(10.,5.,10.)); + modeldatas[2].transform=glam::Affine3A::from_translation(glam::vec3(-10.,5.,10.)); - let main_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 1, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Texture { - sample_type: wgpu::TextureSampleType::Float { filterable: true }, - multisampled: false, - view_dimension: wgpu::TextureViewDimension::Cube, - }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 2, - visibility: wgpu::ShaderStages::FRAGMENT, - ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), - count: None, - }, - ], - }); - let model_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: None, - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }, - ], - }); + let main_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: None, + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + multisampled: false, + view_dimension: wgpu::TextureViewDimension::Cube, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 2, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + }); + let model_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: None, + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }, + ], + }); - // Create the render pipeline - let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { - label: None, - source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))), - }); + // Create the render pipeline + let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { + label: None, + source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))), + }); - let camera = Camera { - time: Instant::now(), - pos: glam::Vec3::new(5.0,0.0,5.0), - vel: glam::Vec3::new(0.0,0.0,0.0), - gravity: glam::Vec3::new(0.0,-100.0,0.0), - friction: 90.0, - screen_size: (config.width, config.height), - offset: glam::Vec3::new(0.0,4.5,0.0), - fov: 1.0, //fov_slope = tan(fov_y/2) - pitch: 0.0, - yaw: 0.0, - mv: 2.7, - controls:0, - grounded: true, - walkspeed: 18.0, - }; - let camera_uniforms = camera.to_uniform_data(); - let camera_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Camera"), - contents: bytemuck::cast_slice(&camera_uniforms), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); + let camera = Camera { + time: Instant::now(), + pos: glam::Vec3::new(5.0,0.0,5.0), + vel: glam::Vec3::new(0.0,0.0,0.0), + gravity: glam::Vec3::new(0.0,-100.0,0.0), + friction: 90.0, + screen_size: (config.width, config.height), + offset: glam::Vec3::new(0.0,4.5,0.0), + fov: 1.0, //fov_slope = tan(fov_y/2) + pitch: 0.0, + yaw: 0.0, + mv: 2.7, + controls:0, + grounded: true, + walkspeed: 18.0, + }; + let camera_uniforms = camera.to_uniform_data(); + let camera_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera"), + contents: bytemuck::cast_slice(&camera_uniforms), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); - let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { - label: None, - bind_group_layouts: &[&main_bind_group_layout, &model_bind_group_layout], - push_constant_ranges: &[], - }); + let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[&main_bind_group_layout, &model_bind_group_layout], + push_constant_ranges: &[], + }); - // Create the render pipelines - let sky_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("Sky"), - layout: Some(&pipeline_layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_sky", - buffers: &[], - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_sky", - targets: &[Some(config.view_formats[0].into())], - }), - primitive: wgpu::PrimitiveState { - front_face: wgpu::FrontFace::Cw, - ..Default::default() - }, - depth_stencil: Some(wgpu::DepthStencilState { - format: Self::DEPTH_FORMAT, - depth_write_enabled: false, - depth_compare: wgpu::CompareFunction::LessEqual, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState::default(), - multiview: None, - }); - let entity_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("Entity"), - layout: Some(&pipeline_layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_entity", - buffers: &[wgpu::VertexBufferLayout { - array_stride: std::mem::size_of::() as wgpu::BufferAddress, - step_mode: wgpu::VertexStepMode::Vertex, - attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2, 2 => Float32x3], - }], - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_entity", - targets: &[Some(config.view_formats[0].into())], - }), - primitive: wgpu::PrimitiveState { - front_face: wgpu::FrontFace::Cw, - ..Default::default() - }, - depth_stencil: Some(wgpu::DepthStencilState { - format: Self::DEPTH_FORMAT, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::LessEqual, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState::default(), - multiview: None, - }); - let ground_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("Ground"), - layout: Some(&pipeline_layout), - vertex: wgpu::VertexState { - module: &shader, - entry_point: "vs_ground", - buffers: &[], - }, - fragment: Some(wgpu::FragmentState { - module: &shader, - entry_point: "fs_ground", - targets: &[Some(config.view_formats[0].into())], - }), - primitive: wgpu::PrimitiveState { - front_face: wgpu::FrontFace::Cw, - ..Default::default() - }, - depth_stencil: Some(wgpu::DepthStencilState { - format: Self::DEPTH_FORMAT, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::LessEqual, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }), - multisample: wgpu::MultisampleState::default(), - multiview: None, - }); + // Create the render pipelines + let sky_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Sky"), + layout: Some(&pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_sky", + buffers: &[], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_sky", + targets: &[Some(config.view_formats[0].into())], + }), + primitive: wgpu::PrimitiveState { + front_face: wgpu::FrontFace::Cw, + ..Default::default() + }, + depth_stencil: Some(wgpu::DepthStencilState { + format: Self::DEPTH_FORMAT, + depth_write_enabled: false, + depth_compare: wgpu::CompareFunction::LessEqual, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState::default(), + multiview: None, + }); + let entity_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Entity"), + layout: Some(&pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_entity", + buffers: &[wgpu::VertexBufferLayout { + array_stride: std::mem::size_of::() as wgpu::BufferAddress, + step_mode: wgpu::VertexStepMode::Vertex, + attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2, 2 => Float32x3], + }], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_entity", + targets: &[Some(config.view_formats[0].into())], + }), + primitive: wgpu::PrimitiveState { + front_face: wgpu::FrontFace::Cw, + ..Default::default() + }, + depth_stencil: Some(wgpu::DepthStencilState { + format: Self::DEPTH_FORMAT, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::LessEqual, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState::default(), + multiview: None, + }); + let ground_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("Ground"), + layout: Some(&pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: "vs_ground", + buffers: &[], + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: "fs_ground", + targets: &[Some(config.view_formats[0].into())], + }), + primitive: wgpu::PrimitiveState { + front_face: wgpu::FrontFace::Cw, + ..Default::default() + }, + depth_stencil: Some(wgpu::DepthStencilState { + format: Self::DEPTH_FORMAT, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::LessEqual, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }), + multisample: wgpu::MultisampleState::default(), + multiview: None, + }); - let sampler = device.create_sampler(&wgpu::SamplerDescriptor { - label: None, - address_mode_u: wgpu::AddressMode::ClampToEdge, - address_mode_v: wgpu::AddressMode::ClampToEdge, - address_mode_w: wgpu::AddressMode::ClampToEdge, - mag_filter: wgpu::FilterMode::Linear, - min_filter: wgpu::FilterMode::Linear, - mipmap_filter: wgpu::FilterMode::Linear, - ..Default::default() - }); + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { + label: None, + address_mode_u: wgpu::AddressMode::ClampToEdge, + address_mode_v: wgpu::AddressMode::ClampToEdge, + address_mode_w: wgpu::AddressMode::ClampToEdge, + mag_filter: wgpu::FilterMode::Linear, + min_filter: wgpu::FilterMode::Linear, + mipmap_filter: wgpu::FilterMode::Linear, + ..Default::default() + }); - let device_features = device.features(); + let device_features = device.features(); - let skybox_format = if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ASTC) { - log::info!("Using ASTC"); - wgpu::TextureFormat::Astc { - block: AstcBlock::B4x4, - channel: AstcChannel::UnormSrgb, - } - } else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ETC2) { - log::info!("Using ETC2"); - wgpu::TextureFormat::Etc2Rgb8UnormSrgb - } else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_BC) { - log::info!("Using BC"); - wgpu::TextureFormat::Bc1RgbaUnormSrgb - } else { - log::info!("Using plain"); - wgpu::TextureFormat::Bgra8UnormSrgb - }; + let skybox_format = if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ASTC) { + log::info!("Using ASTC"); + wgpu::TextureFormat::Astc { + block: AstcBlock::B4x4, + channel: AstcChannel::UnormSrgb, + } + } else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ETC2) { + log::info!("Using ETC2"); + wgpu::TextureFormat::Etc2Rgb8UnormSrgb + } else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_BC) { + log::info!("Using BC"); + wgpu::TextureFormat::Bc1RgbaUnormSrgb + } else { + log::info!("Using plain"); + wgpu::TextureFormat::Bgra8UnormSrgb + }; - let size = wgpu::Extent3d { - width: IMAGE_SIZE, - height: IMAGE_SIZE, - depth_or_array_layers: 6, - }; + let size = wgpu::Extent3d { + width: IMAGE_SIZE, + height: IMAGE_SIZE, + depth_or_array_layers: 6, + }; - let layer_size = wgpu::Extent3d { - depth_or_array_layers: 1, - ..size - }; - let max_mips = layer_size.max_mips(wgpu::TextureDimension::D2); + let layer_size = wgpu::Extent3d { + depth_or_array_layers: 1, + ..size + }; + let max_mips = layer_size.max_mips(wgpu::TextureDimension::D2); - log::debug!( - "Copying {:?} skybox images of size {}, {}, 6 with {} mips to gpu", - skybox_format, - IMAGE_SIZE, - IMAGE_SIZE, - max_mips, - ); + log::debug!( + "Copying {:?} skybox images of size {}, {}, 6 with {} mips to gpu", + skybox_format, + IMAGE_SIZE, + IMAGE_SIZE, + max_mips, + ); - let bytes = match skybox_format { - wgpu::TextureFormat::Astc { - block: AstcBlock::B4x4, - channel: AstcChannel::UnormSrgb, - } => &include_bytes!("../images/astc.dds")[..], - wgpu::TextureFormat::Etc2Rgb8UnormSrgb => &include_bytes!("../images/etc2.dds")[..], - wgpu::TextureFormat::Bc1RgbaUnormSrgb => &include_bytes!("../images/bc1.dds")[..], - wgpu::TextureFormat::Bgra8UnormSrgb => &include_bytes!("../images/bgra.dds")[..], - _ => unreachable!(), - }; + let bytes = match skybox_format { + wgpu::TextureFormat::Astc { + block: AstcBlock::B4x4, + channel: AstcChannel::UnormSrgb, + } => &include_bytes!("../images/astc.dds")[..], + wgpu::TextureFormat::Etc2Rgb8UnormSrgb => &include_bytes!("../images/etc2.dds")[..], + wgpu::TextureFormat::Bc1RgbaUnormSrgb => &include_bytes!("../images/bc1.dds")[..], + wgpu::TextureFormat::Bgra8UnormSrgb => &include_bytes!("../images/bgra.dds")[..], + _ => unreachable!(), + }; - let image = ddsfile::Dds::read(&mut std::io::Cursor::new(&bytes)).unwrap(); + let image = ddsfile::Dds::read(&mut std::io::Cursor::new(&bytes)).unwrap(); - let texture = device.create_texture_with_data( - queue, - &wgpu::TextureDescriptor { - size, - mip_level_count: max_mips, - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - format: skybox_format, - usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, - label: None, - view_formats: &[], - }, - &image.data, - ); + let texture = device.create_texture_with_data( + queue, + &wgpu::TextureDescriptor { + size, + mip_level_count: max_mips, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: skybox_format, + usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, + label: None, + view_formats: &[], + }, + &image.data, + ); - let texture_view = texture.create_view(&wgpu::TextureViewDescriptor { - label: None, - dimension: Some(wgpu::TextureViewDimension::Cube), - ..wgpu::TextureViewDescriptor::default() - }); - let main_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &main_bind_group_layout, - entries: &[ - wgpu::BindGroupEntry { - binding: 0, - resource: camera_buf.as_entire_binding(), - }, - wgpu::BindGroupEntry { - binding: 1, - resource: wgpu::BindingResource::TextureView(&texture_view), - }, - wgpu::BindGroupEntry { - binding: 2, - resource: wgpu::BindingResource::Sampler(&sampler), - }, - ], - label: Some("Camera"), - }); + let texture_view = texture.create_view(&wgpu::TextureViewDescriptor { + label: None, + dimension: Some(wgpu::TextureViewDimension::Cube), + ..wgpu::TextureViewDescriptor::default() + }); + let main_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &main_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: camera_buf.as_entire_binding(), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::TextureView(&texture_view), + }, + wgpu::BindGroupEntry { + binding: 2, + resource: wgpu::BindingResource::Sampler(&sampler), + }, + ], + label: Some("Camera"), + }); - //drain the modeldata vec so entities can be /moved/ to models.entities - let mut models = Vec::::with_capacity(modeldatas.len()); - for (i,modeldata) in modeldatas.drain(..).enumerate() { - let model_uniforms = get_transform_uniform_data(&modeldata.transform); - let model_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some(format!("Model{}",i).as_str()), - contents: bytemuck::cast_slice(&model_uniforms), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - let model_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &model_bind_group_layout, - entries: &[ - wgpu::BindGroupEntry { - binding: 0, - resource: model_buf.as_entire_binding(), - }, - ], - label: Some(format!("Model{}",i).as_str()), - }); - //all of these are being moved here - models.push(Model{ - transform: modeldata.transform, - vertex_buf:modeldata.vertex_buf, - entities: modeldata.entities, - bind_group: model_bind_group, - model_buf: model_buf, - }) - } + //drain the modeldata vec so entities can be /moved/ to models.entities + let mut models = Vec::::with_capacity(modeldatas.len()); + for (i,modeldata) in modeldatas.drain(..).enumerate() { + let model_uniforms = get_transform_uniform_data(&modeldata.transform); + let model_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some(format!("Model{}",i).as_str()), + contents: bytemuck::cast_slice(&model_uniforms), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + let model_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &model_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: model_buf.as_entire_binding(), + }, + ], + label: Some(format!("Model{}",i).as_str()), + }); + //all of these are being moved here + models.push(Model{ + transform: modeldata.transform, + vertex_buf:modeldata.vertex_buf, + entities: modeldata.entities, + bind_group: model_bind_group, + model_buf: model_buf, + }) + } - let depth_view = Self::create_depth_texture(config, device); + let depth_view = Self::create_depth_texture(config, device); - Skybox { - camera, - sky_pipeline, - entity_pipeline, - ground_pipeline, - main_bind_group, - camera_buf, - models, - depth_view, - staging_belt: wgpu::util::StagingBelt::new(0x100), - } - } + Skybox { + camera, + sky_pipeline, + entity_pipeline, + ground_pipeline, + main_bind_group, + camera_buf, + models, + depth_view, + staging_belt: wgpu::util::StagingBelt::new(0x100), + } + } - #[allow(clippy::single_match)] - fn update(&mut self, event: winit::event::WindowEvent) { - match event { - winit::event::WindowEvent::KeyboardInput { - input: - winit::event::KeyboardInput { - state, - virtual_keycode: Some(keycode), - .. - }, - .. - } => { - match (state,keycode) { - (k,winit::event::VirtualKeyCode::W) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEFORWARD, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEFORWARD, - } - (k,winit::event::VirtualKeyCode::A) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVELEFT, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVELEFT, - } - (k,winit::event::VirtualKeyCode::S) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEBACK, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEBACK, - } - (k,winit::event::VirtualKeyCode::D) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVERIGHT, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVERIGHT, - } - (k,winit::event::VirtualKeyCode::E) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEUP, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEUP, - } - (k,winit::event::VirtualKeyCode::Q) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEDOWN, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEDOWN, - } - (k,winit::event::VirtualKeyCode::Space) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_JUMP, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_JUMP, - } - (k,winit::event::VirtualKeyCode::Z) => match k { - winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_ZOOM, - winit::event::ElementState::Released => self.camera.controls&=!CONTROL_ZOOM, - } - _ => (), - } - } - _ => {} - } - } + #[allow(clippy::single_match)] + fn update(&mut self, event: winit::event::WindowEvent) { + match event { + winit::event::WindowEvent::KeyboardInput { + input: + winit::event::KeyboardInput { + state, + virtual_keycode: Some(keycode), + .. + }, + .. + } => { + match (state,keycode) { + (k,winit::event::VirtualKeyCode::W) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEFORWARD, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEFORWARD, + } + (k,winit::event::VirtualKeyCode::A) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVELEFT, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVELEFT, + } + (k,winit::event::VirtualKeyCode::S) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEBACK, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEBACK, + } + (k,winit::event::VirtualKeyCode::D) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVERIGHT, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVERIGHT, + } + (k,winit::event::VirtualKeyCode::E) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEUP, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEUP, + } + (k,winit::event::VirtualKeyCode::Q) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEDOWN, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEDOWN, + } + (k,winit::event::VirtualKeyCode::Space) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_JUMP, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_JUMP, + } + (k,winit::event::VirtualKeyCode::Z) => match k { + winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_ZOOM, + winit::event::ElementState::Released => self.camera.controls&=!CONTROL_ZOOM, + } + _ => (), + } + } + _ => {} + } + } - fn move_mouse(&mut self, delta: (f64,f64)) { - self.camera.pitch=(self.camera.pitch as f64+delta.1/-512.) as f32; - self.camera.yaw=(self.camera.yaw as f64+delta.0/-512.) as f32; - } + fn move_mouse(&mut self, delta: (f64,f64)) { + self.camera.pitch=(self.camera.pitch as f64+delta.1/-512.) as f32; + self.camera.yaw=(self.camera.yaw as f64+delta.0/-512.) as f32; + } - fn resize( - &mut self, - config: &wgpu::SurfaceConfiguration, - device: &wgpu::Device, - _queue: &wgpu::Queue, - ) { - self.depth_view = Self::create_depth_texture(config, device); - self.camera.screen_size = (config.width, config.height); - } + fn resize( + &mut self, + config: &wgpu::SurfaceConfiguration, + device: &wgpu::Device, + _queue: &wgpu::Queue, + ) { + self.depth_view = Self::create_depth_texture(config, device); + self.camera.screen_size = (config.width, config.height); + } - fn render( - &mut self, - view: &wgpu::TextureView, - device: &wgpu::Device, - queue: &wgpu::Queue, - _spawner: &strafe_client::framework::Spawner, - ) { - let time = Instant::now(); + fn render( + &mut self, + view: &wgpu::TextureView, + device: &wgpu::Device, + queue: &wgpu::Queue, + _spawner: &strafe_client::framework::Spawner, + ) { + let time = Instant::now(); - //physique - let dt=(time-self.camera.time).as_secs_f32(); - self.camera.time=time; - let camera_mat=glam::Mat3::from_euler(glam::EulerRot::YXZ,self.camera.yaw,0f32,0f32); - let control_dir=camera_mat*get_control_dir(self.camera.controls&(CONTROL_MOVELEFT|CONTROL_MOVERIGHT|CONTROL_MOVEFORWARD|CONTROL_MOVEBACK)).normalize_or_zero(); - let d=self.camera.vel.dot(control_dir); - if d( - format!("Strafe Client v{}", - env!("CARGO_PKG_VERSION") - ).as_str() - ); + strafe_client::framework::run::( + format!("Strafe Client v{}", + env!("CARGO_PKG_VERSION") + ).as_str() + ); } diff --git a/src/shader.wgsl b/src/shader.wgsl index dd55d4e..a60fc5c 100644 --- a/src/shader.wgsl +++ b/src/shader.wgsl @@ -1,17 +1,17 @@ struct SkyOutput { - @builtin(position) position: vec4, - @location(0) sampledir: vec3, + @builtin(position) position: vec4, + @location(0) sampledir: vec3, }; struct Data { - // from camera to screen - proj: mat4x4, - // from screen to camera - proj_inv: mat4x4, - // from world to camera - view: mat4x4, - // camera position - cam_pos: vec4, + // from camera to screen + proj: mat4x4, + // from screen to camera + proj_inv: mat4x4, + // from world to camera + view: mat4x4, + // camera position + cam_pos: vec4, }; @group(0) @binding(0) @@ -19,53 +19,53 @@ var r_data: Data; @vertex fn vs_sky(@builtin(vertex_index) vertex_index: u32) -> SkyOutput { - // hacky way to draw a large triangle - let tmp1 = i32(vertex_index) / 2; - let tmp2 = i32(vertex_index) & 1; - let pos = vec4( - f32(tmp1) * 4.0 - 1.0, - f32(tmp2) * 4.0 - 1.0, - 1.0, - 1.0 - ); + // hacky way to draw a large triangle + let tmp1 = i32(vertex_index) / 2; + let tmp2 = i32(vertex_index) & 1; + let pos = vec4( + f32(tmp1) * 4.0 - 1.0, + f32(tmp2) * 4.0 - 1.0, + 1.0, + 1.0 + ); - // transposition = inversion for this orthonormal matrix - let inv_model_view = transpose(mat3x3(r_data.view[0].xyz, r_data.view[1].xyz, r_data.view[2].xyz)); - let unprojected = r_data.proj_inv * pos; + // transposition = inversion for this orthonormal matrix + let inv_model_view = transpose(mat3x3(r_data.view[0].xyz, r_data.view[1].xyz, r_data.view[2].xyz)); + let unprojected = r_data.proj_inv * pos; - var result: SkyOutput; - result.sampledir = inv_model_view * unprojected.xyz; - result.position = pos; - return result; + var result: SkyOutput; + result.sampledir = inv_model_view * unprojected.xyz; + result.position = pos; + return result; } struct GroundOutput { - @builtin(position) position: vec4, - @location(4) pos: vec3, + @builtin(position) position: vec4, + @location(4) pos: vec3, }; @vertex fn vs_ground(@builtin(vertex_index) vertex_index: u32) -> GroundOutput { - // hacky way to draw two triangles that make a square - let tmp1 = i32(vertex_index)/2-i32(vertex_index)/3; - let tmp2 = i32(vertex_index)&1; - let pos = vec3( - f32(tmp1) * 2.0 - 1.0, - 0.0, - f32(tmp2) * 2.0 - 1.0 - ) * 160.0; + // hacky way to draw two triangles that make a square + let tmp1 = i32(vertex_index)/2-i32(vertex_index)/3; + let tmp2 = i32(vertex_index)&1; + let pos = vec3( + f32(tmp1) * 2.0 - 1.0, + 0.0, + f32(tmp2) * 2.0 - 1.0 + ) * 160.0; - var result: GroundOutput; - result.pos = pos; - result.position = r_data.proj * r_data.view * vec4(pos, 1.0); - return result; + var result: GroundOutput; + result.pos = pos; + result.position = r_data.proj * r_data.view * vec4(pos, 1.0); + return result; } struct EntityOutput { - @builtin(position) position: vec4, - @location(1) texture: vec2, - @location(2) normal: vec3, - @location(3) view: vec3, + @builtin(position) position: vec4, + @location(1) texture: vec2, + @location(2) normal: vec3, + @location(3) view: vec3, }; @group(1) @@ -74,17 +74,17 @@ var r_EntityTransform: mat4x4; @vertex fn vs_entity( - @location(0) pos: vec3, - @location(1) texture: vec2, - @location(2) normal: vec3, + @location(0) pos: vec3, + @location(1) texture: vec2, + @location(2) normal: vec3, ) -> EntityOutput { - var position: vec4 = r_EntityTransform * vec4(pos, 1.0); - var result: EntityOutput; - result.normal = (r_EntityTransform * vec4(normal, 0.0)).xyz; - result.texture=texture; - result.view = position.xyz - r_data.cam_pos.xyz; - result.position = r_data.proj * r_data.view * position; - return result; + var position: vec4 = r_EntityTransform * vec4(pos, 1.0); + var result: EntityOutput; + result.normal = (r_EntityTransform * vec4(normal, 0.0)).xyz; + result.texture=texture; + result.view = position.xyz - r_data.cam_pos.xyz; + result.position = r_data.proj * r_data.view * position; + return result; } @group(0) @@ -96,36 +96,36 @@ var r_sampler: sampler; @fragment fn fs_sky(vertex: SkyOutput) -> @location(0) vec4 { - return textureSample(r_texture, r_sampler, vertex.sampledir); + return textureSample(r_texture, r_sampler, vertex.sampledir); } @fragment fn fs_entity(vertex: EntityOutput) -> @location(0) vec4 { - let incident = normalize(vertex.view); - let normal = normalize(vertex.normal); - let d = dot(normal, incident); - let reflected = incident - 2.0 * d * normal; + let incident = normalize(vertex.view); + let normal = normalize(vertex.normal); + let d = dot(normal, incident); + let reflected = incident - 2.0 * d * normal; - let dir = vec3(-1.0)+2.0*vec3(vertex.texture.x,0.0,vertex.texture.y); - let texture_color = textureSample(r_texture, r_sampler, dir).rgb; - let reflected_color = textureSample(r_texture, r_sampler, reflected).rgb; - return vec4(mix(vec3(0.1) + 0.5 * reflected_color,texture_color,1.0-pow(1.0-abs(d),2.0)), 1.0); + let dir = vec3(-1.0)+2.0*vec3(vertex.texture.x,0.0,vertex.texture.y); + let texture_color = textureSample(r_texture, r_sampler, dir).rgb; + let reflected_color = textureSample(r_texture, r_sampler, reflected).rgb; + return vec4(mix(vec3(0.1) + 0.5 * reflected_color,texture_color,1.0-pow(1.0-abs(d),2.0)), 1.0); } fn modulo_euclidean (a: f32, b: f32) -> f32 { - var m = a % b; - if (m < 0.0) { - if (b < 0.0) { - m -= b; - } else { - m += b; - } - } - return m; + var m = a % b; + if (m < 0.0) { + if (b < 0.0) { + m -= b; + } else { + m += b; + } + } + return m; } @fragment fn fs_ground(vertex: GroundOutput) -> @location(0) vec4 { - let dir = vec3(-1.0)+vec3(modulo_euclidean(vertex.pos.x/16.,1.0),0.0,modulo_euclidean(vertex.pos.z/16.,1.0))*2.0; - return vec4(textureSample(r_texture, r_sampler, dir).rgb, 1.0); + let dir = vec3(-1.0)+vec3(modulo_euclidean(vertex.pos.x/16.,1.0),0.0,modulo_euclidean(vertex.pos.z/16.,1.0))*2.0; + return vec4(textureSample(r_texture, r_sampler, dir).rgb, 1.0); }