forked from StrafesNET/strafe-client
Compare commits
4 Commits
master
...
depth-fudg
Author | SHA1 | Date | |
---|---|---|---|
67c4aa07ab | |||
34bfeab267 | |||
52cbda4710 | |||
9d8faeb6fb |
@ -1,2 +0,0 @@
|
||||
[registries.strafesnet]
|
||||
index = "sparse+https://git.itzana.me/api/packages/strafesnet/cargo/"
|
2489
Cargo.lock
generated
2489
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
31
Cargo.toml
31
Cargo.toml
@ -1,36 +1,23 @@
|
||||
[package]
|
||||
name = "strafe-client"
|
||||
version = "0.10.5"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
repository = "https://git.itzana.me/StrafesNET/strafe-client"
|
||||
license = "Custom"
|
||||
description = "StrafesNET game client for bhop and surf."
|
||||
authors = ["Rhys Lloyd <krakow20@gmail.com>"]
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[features]
|
||||
default = ["snf"]
|
||||
snf = ["dep:strafesnet_snf"]
|
||||
source = ["dep:strafesnet_deferred_loader", "dep:strafesnet_bsp_loader"]
|
||||
roblox = ["dep:strafesnet_deferred_loader", "dep:strafesnet_rbx_loader"]
|
||||
|
||||
[dependencies]
|
||||
async-executor = "1.5.1"
|
||||
bytemuck = { version = "1.13.1", features = ["derive"] }
|
||||
configparser = "3.0.2"
|
||||
ddsfile = "0.5.1"
|
||||
glam = "0.29.0"
|
||||
id = { version = "0.1.0", registry = "strafesnet" }
|
||||
parking_lot = "0.12.1"
|
||||
env_logger = "0.10.0"
|
||||
glam = "0.24.1"
|
||||
log = "0.4.20"
|
||||
obj = "0.10.2"
|
||||
pollster = "0.3.0"
|
||||
strafesnet_bsp_loader = { version = "0.2.1", registry = "strafesnet", optional = true }
|
||||
strafesnet_common = { version = "0.5.2", registry = "strafesnet" }
|
||||
strafesnet_deferred_loader = { version = "0.4.0", features = ["legacy"], registry = "strafesnet", optional = true }
|
||||
strafesnet_rbx_loader = { version = "0.5.1", registry = "strafesnet", optional = true }
|
||||
strafesnet_snf = { version = "0.2.0", registry = "strafesnet", optional = true }
|
||||
wgpu = "22.1.0"
|
||||
winit = "0.30.5"
|
||||
wgpu = "0.17.0"
|
||||
winit = "0.28.6"
|
||||
|
||||
[profile.release]
|
||||
#lto = true
|
||||
lto = true
|
||||
strip = true
|
||||
codegen-units = 1
|
||||
|
2
LICENSE
2
LICENSE
@ -1,5 +1,5 @@
|
||||
/*******************************************************
|
||||
* Copyright (C) 2023-2024 Rhys Lloyd <krakow20@gmail.com>
|
||||
* Copyright (C) 2023 Rhys Lloyd <krakow20@gmail.com>
|
||||
*
|
||||
* This file is part of the StrafesNET bhop/surf client.
|
||||
*
|
||||
|
BIN
images/squid.dds
BIN
images/squid.dds
Binary file not shown.
@ -1,21 +0,0 @@
|
||||
pub type QNWorker<'a,Task>=CompatNWorker<'a,Task>;
|
||||
pub type INWorker<'a,Task>=CompatNWorker<'a,Task>;
|
||||
|
||||
pub struct CompatNWorker<'a,Task>{
|
||||
data:std::marker::PhantomData<Task>,
|
||||
f:Box<dyn FnMut(Task)+Send+'a>,
|
||||
}
|
||||
|
||||
impl<'a,Task> CompatNWorker<'a,Task>{
|
||||
pub fn new(f:impl FnMut(Task)+Send+'a)->CompatNWorker<'a,Task>{
|
||||
Self{
|
||||
data:std::marker::PhantomData,
|
||||
f:Box::new(f),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&mut self,task:Task)->Result<(),()>{
|
||||
(self.f)(task);
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,127 +0,0 @@
|
||||
use crate::physics::Body;
|
||||
use crate::model_physics::{GigaTime,FEV,MeshQuery,DirectedEdge,MinkowskiMesh,MinkowskiFace,MinkowskiDirectedEdge,MinkowskiVert};
|
||||
use strafesnet_common::integer::{Time,Fixed,Ratio};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Transition<F,E:DirectedEdge,V>{
|
||||
Miss,
|
||||
Next(FEV<F,E,V>,GigaTime),
|
||||
Hit(F,GigaTime),
|
||||
}
|
||||
|
||||
type MinkowskiFEV=FEV<MinkowskiFace,MinkowskiDirectedEdge,MinkowskiVert>;
|
||||
type MinkowskiTransition=Transition<MinkowskiFace,MinkowskiDirectedEdge,MinkowskiVert>;
|
||||
|
||||
fn next_transition(fev:&MinkowskiFEV,body_time:GigaTime,mesh:&MinkowskiMesh,body:&Body,mut best_time:GigaTime)->MinkowskiTransition{
|
||||
//conflicting derivative means it crosses in the wrong direction.
|
||||
//if the transition time is equal to an already tested transition, do not replace the current best.
|
||||
let mut best_transition=MinkowskiTransition::Miss;
|
||||
match fev{
|
||||
&MinkowskiFEV::Face(face_id)=>{
|
||||
//test own face collision time, ignoring roots with zero or conflicting derivative
|
||||
//n=face.normal d=face.dot
|
||||
//n.a t^2+n.v t+n.p-d==0
|
||||
let (n,d)=mesh.face_nd(face_id);
|
||||
//TODO: use higher precision d value?
|
||||
//use the mesh transform translation instead of baking it into the d value.
|
||||
for dt in Fixed::<4,128>::zeroes2((n.dot(body.position)-d)*2,n.dot(body.velocity)*2,n.dot(body.acceleration)){
|
||||
if body_time.le_ratio(dt)&&dt.lt_ratio(best_time)&&n.dot(body.extrapolated_velocity_ratio_dt(dt)).is_negative(){
|
||||
best_time=dt;
|
||||
best_transition=MinkowskiTransition::Hit(face_id,dt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
//test each edge collision time, ignoring roots with zero or conflicting derivative
|
||||
for &directed_edge_id in mesh.face_edges(face_id).iter(){
|
||||
let edge_n=mesh.directed_edge_n(directed_edge_id);
|
||||
let n=n.cross(edge_n);
|
||||
let verts=mesh.edge_verts(directed_edge_id.as_undirected());
|
||||
//WARNING: d is moved out of the *2 block because of adding two vertices!
|
||||
//WARNING: precision is swept under the rug!
|
||||
for dt in Fixed::<4,128>::zeroes2(n.dot(body.position*2-(mesh.vert(verts[0])+mesh.vert(verts[1]))).fix_4(),n.dot(body.velocity).fix_4()*2,n.dot(body.acceleration).fix_4()){
|
||||
if body_time.le_ratio(dt)&&dt.lt_ratio(best_time)&&n.dot(body.extrapolated_velocity_ratio_dt(dt)).is_negative(){
|
||||
best_time=dt;
|
||||
best_transition=MinkowskiTransition::Next(MinkowskiFEV::Edge(directed_edge_id.as_undirected()),dt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
//if none:
|
||||
},
|
||||
&MinkowskiFEV::Edge(edge_id)=>{
|
||||
//test each face collision time, ignoring roots with zero or conflicting derivative
|
||||
let edge_n=mesh.edge_n(edge_id);
|
||||
let edge_verts=mesh.edge_verts(edge_id);
|
||||
let delta_pos=body.position*2-(mesh.vert(edge_verts[0])+mesh.vert(edge_verts[1]));
|
||||
for (i,&edge_face_id) in mesh.edge_faces(edge_id).iter().enumerate(){
|
||||
let face_n=mesh.face_nd(edge_face_id).0;
|
||||
//edge_n gets parity from the order of edge_faces
|
||||
let n=face_n.cross(edge_n)*((i as i64)*2-1);
|
||||
//WARNING yada yada d *2
|
||||
for dt in Fixed::<4,128>::zeroes2(n.dot(delta_pos).fix_4(),n.dot(body.velocity).fix_4()*2,n.dot(body.acceleration).fix_4()){
|
||||
if body_time.le_ratio(dt)&&dt.lt_ratio(best_time)&&n.dot(body.extrapolated_velocity_ratio_dt(dt)).is_negative(){
|
||||
best_time=dt;
|
||||
best_transition=MinkowskiTransition::Next(MinkowskiFEV::Face(edge_face_id),dt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
//test each vertex collision time, ignoring roots with zero or conflicting derivative
|
||||
for (i,&vert_id) in edge_verts.iter().enumerate(){
|
||||
//vertex normal gets parity from vert index
|
||||
let n=edge_n*(1-2*(i as i64));
|
||||
for dt in Fixed::<2,64>::zeroes2((n.dot(body.position-mesh.vert(vert_id)))*2,n.dot(body.velocity)*2,n.dot(body.acceleration)){
|
||||
if body_time.le_ratio(dt)&&dt.lt_ratio(best_time)&&n.dot(body.extrapolated_velocity_ratio_dt(dt)).is_negative(){
|
||||
let dt=Ratio::new(dt.num.fix_4(),dt.den.fix_4());
|
||||
best_time=dt;
|
||||
best_transition=MinkowskiTransition::Next(MinkowskiFEV::Vert(vert_id),dt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
//if none:
|
||||
},
|
||||
&MinkowskiFEV::Vert(vert_id)=>{
|
||||
//test each edge collision time, ignoring roots with zero or conflicting derivative
|
||||
for &directed_edge_id in mesh.vert_edges(vert_id).iter(){
|
||||
//edge is directed away from vertex, but we want the dot product to turn out negative
|
||||
let n=-mesh.directed_edge_n(directed_edge_id);
|
||||
for dt in Fixed::<2,64>::zeroes2((n.dot(body.position-mesh.vert(vert_id)))*2,n.dot(body.velocity)*2,n.dot(body.acceleration)){
|
||||
if body_time.le_ratio(dt)&&dt.lt_ratio(best_time)&&n.dot(body.extrapolated_velocity_ratio_dt(dt)).is_negative(){
|
||||
let dt=Ratio::new(dt.num.fix_4(),dt.den.fix_4());
|
||||
best_time=dt;
|
||||
best_transition=MinkowskiTransition::Next(MinkowskiFEV::Edge(directed_edge_id.as_undirected()),dt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
//if none:
|
||||
},
|
||||
}
|
||||
best_transition
|
||||
}
|
||||
pub enum CrawlResult<F,E:DirectedEdge,V>{
|
||||
Miss(FEV<F,E,V>),
|
||||
Hit(F,GigaTime),
|
||||
}
|
||||
type MinkowskiCrawlResult=CrawlResult<MinkowskiFace,MinkowskiDirectedEdge,MinkowskiVert>;
|
||||
pub fn crawl_fev(mut fev:MinkowskiFEV,mesh:&MinkowskiMesh,relative_body:&Body,start_time:Time,time_limit:Time)->MinkowskiCrawlResult{
|
||||
let mut body_time={
|
||||
let r=(start_time-relative_body.time).to_ratio();
|
||||
Ratio::new(r.num.fix_4(),r.den.fix_4())
|
||||
};
|
||||
let time_limit={
|
||||
let r=(time_limit-relative_body.time).to_ratio();
|
||||
Ratio::new(r.num.fix_4(),r.den.fix_4())
|
||||
};
|
||||
for _ in 0..20{
|
||||
match next_transition(&fev,body_time,mesh,relative_body,time_limit){
|
||||
Transition::Miss=>return CrawlResult::Miss(fev),
|
||||
Transition::Next(next_fev,next_time)=>(fev,body_time)=(next_fev,next_time),
|
||||
Transition::Hit(face,time)=>return CrawlResult::Hit(face,time),
|
||||
}
|
||||
}
|
||||
//TODO: fix all bugs
|
||||
//println!("Too many iterations! Using default behaviour instead of crashing...");
|
||||
CrawlResult::Miss(fev)
|
||||
}
|
144
src/file.rs
144
src/file.rs
@ -1,144 +0,0 @@
|
||||
use std::io::Read;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ReadError{
|
||||
#[cfg(feature="roblox")]
|
||||
Roblox(strafesnet_rbx_loader::ReadError),
|
||||
#[cfg(feature="source")]
|
||||
Source(strafesnet_bsp_loader::ReadError),
|
||||
#[cfg(feature="snf")]
|
||||
StrafesNET(strafesnet_snf::Error),
|
||||
#[cfg(feature="snf")]
|
||||
StrafesNETMap(strafesnet_snf::map::Error),
|
||||
Io(std::io::Error),
|
||||
UnknownFileFormat,
|
||||
}
|
||||
impl std::fmt::Display for ReadError{
|
||||
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
|
||||
write!(f,"{self:?}")
|
||||
}
|
||||
}
|
||||
impl std::error::Error for ReadError{}
|
||||
|
||||
pub enum DataStructure{
|
||||
#[cfg(feature="roblox")]
|
||||
Roblox(strafesnet_rbx_loader::Model),
|
||||
#[cfg(feature="source")]
|
||||
Source(strafesnet_bsp_loader::Bsp),
|
||||
#[cfg(feature="snf")]
|
||||
StrafesNET(strafesnet_common::map::CompleteMap),
|
||||
}
|
||||
|
||||
pub fn read<R:Read+std::io::Seek>(input:R)->Result<DataStructure,ReadError>{
|
||||
let mut buf=std::io::BufReader::new(input);
|
||||
let peek=std::io::BufRead::fill_buf(&mut buf).map_err(ReadError::Io)?;
|
||||
match &peek[0..4]{
|
||||
#[cfg(feature="roblox")]
|
||||
b"<rob"=>Ok(DataStructure::Roblox(strafesnet_rbx_loader::read(buf).map_err(ReadError::Roblox)?)),
|
||||
#[cfg(feature="source")]
|
||||
b"VBSP"=>Ok(DataStructure::Source(strafesnet_bsp_loader::read(buf).map_err(ReadError::Source)?)),
|
||||
#[cfg(feature="snf")]
|
||||
b"SNFM"=>Ok(DataStructure::StrafesNET(
|
||||
strafesnet_snf::read_map(buf).map_err(ReadError::StrafesNET)?
|
||||
.into_complete_map().map_err(ReadError::StrafesNETMap)?
|
||||
)),
|
||||
_=>Err(ReadError::UnknownFileFormat),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum LoadError{
|
||||
ReadError(ReadError),
|
||||
File(std::io::Error),
|
||||
Io(std::io::Error),
|
||||
}
|
||||
impl std::fmt::Display for LoadError{
|
||||
fn fmt(&self,f:&mut std::fmt::Formatter<'_>)->std::fmt::Result{
|
||||
write!(f,"{self:?}")
|
||||
}
|
||||
}
|
||||
impl std::error::Error for LoadError{}
|
||||
|
||||
pub fn load<P:AsRef<std::path::Path>>(path:P)->Result<strafesnet_common::map::CompleteMap,LoadError>{
|
||||
//blocking because it's simpler...
|
||||
let file=std::fs::File::open(path).map_err(LoadError::File)?;
|
||||
match read(file).map_err(LoadError::ReadError)?{
|
||||
#[cfg(feature="snf")]
|
||||
DataStructure::StrafesNET(map)=>Ok(map),
|
||||
#[cfg(feature="roblox")]
|
||||
DataStructure::Roblox(model)=>{
|
||||
let mut place=model.into_place();
|
||||
place.run_scripts();
|
||||
|
||||
let mut loader=strafesnet_deferred_loader::roblox_legacy();
|
||||
|
||||
let (texture_loader,mesh_loader)=loader.get_inner_mut();
|
||||
|
||||
let map_step1=strafesnet_rbx_loader::convert(
|
||||
&place,
|
||||
|name|texture_loader.acquire_render_config_id(name),
|
||||
|name|mesh_loader.acquire_mesh_id(name),
|
||||
);
|
||||
|
||||
let meshpart_meshes=mesh_loader.load_meshes().map_err(LoadError::Io)?;
|
||||
|
||||
let map_step2=map_step1.add_meshpart_meshes_and_calculate_attributes(
|
||||
meshpart_meshes.into_iter().map(|(mesh_id,loader_model)|
|
||||
(mesh_id,strafesnet_rbx_loader::data::RobloxMeshBytes::new(loader_model.get()))
|
||||
)
|
||||
);
|
||||
|
||||
let (textures,render_configs)=loader.into_render_configs().map_err(LoadError::Io)?.consume();
|
||||
|
||||
let map=map_step2.add_render_configs_and_textures(
|
||||
render_configs.into_iter(),
|
||||
textures.into_iter().map(|(texture_id,texture)|
|
||||
(texture_id,match texture{
|
||||
strafesnet_deferred_loader::texture::Texture::ImageDDS(data)=>data,
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
Ok(map)
|
||||
},
|
||||
#[cfg(feature="source")]
|
||||
DataStructure::Source(bsp)=>{
|
||||
let mut loader=strafesnet_deferred_loader::source_legacy();
|
||||
|
||||
let (texture_loader,mesh_loader)=loader.get_inner_mut();
|
||||
|
||||
let map_step1=strafesnet_bsp_loader::convert(
|
||||
&bsp,
|
||||
|name|texture_loader.acquire_render_config_id(name),
|
||||
|name|mesh_loader.acquire_mesh_id(name),
|
||||
);
|
||||
|
||||
let prop_meshes=mesh_loader.load_meshes(bsp.as_ref());
|
||||
|
||||
let map_step2=map_step1.add_prop_meshes(
|
||||
//the type conflagulator 9000
|
||||
prop_meshes.into_iter().map(|(mesh_id,loader_model)|
|
||||
(mesh_id,strafesnet_bsp_loader::data::ModelData{
|
||||
mdl:strafesnet_bsp_loader::data::MdlData::new(loader_model.mdl.get()),
|
||||
vtx:strafesnet_bsp_loader::data::VtxData::new(loader_model.vtx.get()),
|
||||
vvd:strafesnet_bsp_loader::data::VvdData::new(loader_model.vvd.get()),
|
||||
})
|
||||
),
|
||||
|name|texture_loader.acquire_render_config_id(name),
|
||||
);
|
||||
|
||||
let (textures,render_configs)=loader.into_render_configs().map_err(LoadError::Io)?.consume();
|
||||
|
||||
let map=map_step2.add_render_configs_and_textures(
|
||||
render_configs.into_iter(),
|
||||
textures.into_iter().map(|(texture_id,texture)|
|
||||
(texture_id,match texture{
|
||||
strafesnet_deferred_loader::texture::Texture::ImageDDS(data)=>data,
|
||||
})
|
||||
),
|
||||
);
|
||||
|
||||
Ok(map)
|
||||
},
|
||||
}
|
||||
}
|
516
src/framework.rs
Normal file
516
src/framework.rs
Normal file
@ -0,0 +1,516 @@
|
||||
use std::future::Future;
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use std::str::FromStr;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use std::time::Instant;
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
use web_sys::{ImageBitmapRenderingContext, OffscreenCanvas};
|
||||
use winit::{
|
||||
event::{self, WindowEvent},
|
||||
event_loop::{ControlFlow, EventLoop},
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
|
||||
use std::{mem::size_of, slice::from_raw_parts};
|
||||
|
||||
unsafe { from_raw_parts(data.as_ptr() as *const u8, data.len() * size_of::<T>()) }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub enum ShaderStage {
|
||||
Vertex,
|
||||
Fragment,
|
||||
Compute,
|
||||
}
|
||||
|
||||
pub trait Example: 'static + Sized {
|
||||
fn optional_features() -> wgpu::Features {
|
||||
wgpu::Features::empty()
|
||||
}
|
||||
fn required_features() -> wgpu::Features {
|
||||
wgpu::Features::empty()
|
||||
}
|
||||
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
|
||||
wgpu::DownlevelCapabilities {
|
||||
flags: wgpu::DownlevelFlags::empty(),
|
||||
shader_model: wgpu::ShaderModel::Sm5,
|
||||
..wgpu::DownlevelCapabilities::default()
|
||||
}
|
||||
}
|
||||
fn required_limits() -> wgpu::Limits {
|
||||
wgpu::Limits::downlevel_webgl2_defaults() // These downlevel limits will allow the code to run on all possible hardware
|
||||
}
|
||||
fn init(
|
||||
config: &wgpu::SurfaceConfiguration,
|
||||
adapter: &wgpu::Adapter,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
) -> Self;
|
||||
fn resize(
|
||||
&mut self,
|
||||
config: &wgpu::SurfaceConfiguration,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
);
|
||||
fn update(&mut self, event: WindowEvent);
|
||||
fn move_mouse(&mut self, delta: (f64,f64));
|
||||
fn render(
|
||||
&mut self,
|
||||
view: &wgpu::TextureView,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
spawner: &Spawner,
|
||||
);
|
||||
}
|
||||
|
||||
struct Setup {
|
||||
window: winit::window::Window,
|
||||
event_loop: EventLoop<()>,
|
||||
instance: wgpu::Instance,
|
||||
size: winit::dpi::PhysicalSize<u32>,
|
||||
surface: wgpu::Surface,
|
||||
adapter: wgpu::Adapter,
|
||||
device: wgpu::Device,
|
||||
queue: wgpu::Queue,
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
struct OffscreenCanvasSetup {
|
||||
offscreen_canvas: OffscreenCanvas,
|
||||
bitmap_renderer: ImageBitmapRenderingContext,
|
||||
}
|
||||
|
||||
async fn setup<E: Example>(title: &str) -> Setup {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
{
|
||||
env_logger::init();
|
||||
};
|
||||
|
||||
let event_loop = EventLoop::new();
|
||||
let mut builder = winit::window::WindowBuilder::new();
|
||||
builder = builder.with_title(title);
|
||||
#[cfg(windows_OFF)] // TODO
|
||||
{
|
||||
use winit::platform::windows::WindowBuilderExtWindows;
|
||||
builder = builder.with_no_redirection_bitmap(true);
|
||||
}
|
||||
let window = builder.build(&event_loop).unwrap();
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
{
|
||||
use winit::platform::web::WindowExtWebSys;
|
||||
let query_string = web_sys::window().unwrap().location().search().unwrap();
|
||||
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
|
||||
.and_then(|x| x.parse().ok())
|
||||
.unwrap_or(log::Level::Error);
|
||||
console_log::init_with_level(level).expect("could not initialize logger");
|
||||
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
|
||||
// On wasm, append the canvas to the document body
|
||||
web_sys::window()
|
||||
.and_then(|win| win.document())
|
||||
.and_then(|doc| doc.body())
|
||||
.and_then(|body| {
|
||||
body.append_child(&web_sys::Element::from(window.canvas()))
|
||||
.ok()
|
||||
})
|
||||
.expect("couldn't append canvas to document body");
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
{
|
||||
use wasm_bindgen::JsCast;
|
||||
use winit::platform::web::WindowExtWebSys;
|
||||
|
||||
let query_string = web_sys::window().unwrap().location().search().unwrap();
|
||||
if let Some(offscreen_canvas_param) =
|
||||
parse_url_query_string(&query_string, "offscreen_canvas")
|
||||
{
|
||||
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
|
||||
log::info!("Creating OffscreenCanvasSetup");
|
||||
|
||||
let offscreen_canvas =
|
||||
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
|
||||
|
||||
let bitmap_renderer = window
|
||||
.canvas()
|
||||
.get_context("bitmaprenderer")
|
||||
.expect("couldn't create ImageBitmapRenderingContext (Result)")
|
||||
.expect("couldn't create ImageBitmapRenderingContext (Option)")
|
||||
.dyn_into::<ImageBitmapRenderingContext>()
|
||||
.expect("couldn't convert into ImageBitmapRenderingContext");
|
||||
|
||||
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
|
||||
offscreen_canvas,
|
||||
bitmap_renderer,
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
log::info!("Initializing the surface...");
|
||||
|
||||
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
|
||||
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
|
||||
|
||||
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
|
||||
backends,
|
||||
dx12_shader_compiler,
|
||||
});
|
||||
let (size, surface) = unsafe {
|
||||
let size = window.inner_size();
|
||||
|
||||
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
|
||||
let surface = instance.create_surface(&window).unwrap();
|
||||
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
|
||||
let surface = {
|
||||
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
|
||||
log::info!("Creating surface from OffscreenCanvas");
|
||||
instance.create_surface_from_offscreen_canvas(
|
||||
offscreen_canvas_setup.offscreen_canvas.clone(),
|
||||
)
|
||||
} else {
|
||||
instance.create_surface(&window)
|
||||
}
|
||||
}
|
||||
.unwrap();
|
||||
|
||||
(size, surface)
|
||||
};
|
||||
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
|
||||
.await
|
||||
.expect("No suitable GPU adapters found on the system!");
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
{
|
||||
let adapter_info = adapter.get_info();
|
||||
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
|
||||
}
|
||||
|
||||
let optional_features = E::optional_features();
|
||||
let required_features = E::required_features();
|
||||
let adapter_features = adapter.features();
|
||||
assert!(
|
||||
adapter_features.contains(required_features),
|
||||
"Adapter does not support required features for this example: {:?}",
|
||||
required_features - adapter_features
|
||||
);
|
||||
|
||||
let required_downlevel_capabilities = E::required_downlevel_capabilities();
|
||||
let downlevel_capabilities = adapter.get_downlevel_capabilities();
|
||||
assert!(
|
||||
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
|
||||
"Adapter does not support the minimum shader model required to run this example: {:?}",
|
||||
required_downlevel_capabilities.shader_model
|
||||
);
|
||||
assert!(
|
||||
downlevel_capabilities
|
||||
.flags
|
||||
.contains(required_downlevel_capabilities.flags),
|
||||
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
|
||||
required_downlevel_capabilities.flags - downlevel_capabilities.flags
|
||||
);
|
||||
|
||||
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
|
||||
let needed_limits = E::required_limits().using_resolution(adapter.limits());
|
||||
|
||||
let trace_dir = std::env::var("WGPU_TRACE");
|
||||
let (device, queue) = adapter
|
||||
.request_device(
|
||||
&wgpu::DeviceDescriptor {
|
||||
label: None,
|
||||
features: (optional_features & adapter_features) | required_features,
|
||||
limits: needed_limits,
|
||||
},
|
||||
trace_dir.ok().as_ref().map(std::path::Path::new),
|
||||
)
|
||||
.await
|
||||
.expect("Unable to find a suitable GPU adapter!");
|
||||
|
||||
Setup {
|
||||
window,
|
||||
event_loop,
|
||||
instance,
|
||||
size,
|
||||
surface,
|
||||
adapter,
|
||||
device,
|
||||
queue,
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
offscreen_canvas_setup,
|
||||
}
|
||||
}
|
||||
|
||||
fn start<E: Example>(
|
||||
#[cfg(not(target_arch = "wasm32"))] Setup {
|
||||
window,
|
||||
event_loop,
|
||||
instance,
|
||||
size,
|
||||
surface,
|
||||
adapter,
|
||||
device,
|
||||
queue,
|
||||
}: Setup,
|
||||
#[cfg(target_arch = "wasm32")] Setup {
|
||||
window,
|
||||
event_loop,
|
||||
instance,
|
||||
size,
|
||||
surface,
|
||||
adapter,
|
||||
device,
|
||||
queue,
|
||||
offscreen_canvas_setup,
|
||||
}: Setup,
|
||||
) {
|
||||
let spawner = Spawner::new();
|
||||
let mut config = surface
|
||||
.get_default_config(&adapter, size.width, size.height)
|
||||
.expect("Surface isn't supported by the adapter.");
|
||||
let surface_view_format = config.format.add_srgb_suffix();
|
||||
config.view_formats.push(surface_view_format);
|
||||
surface.configure(&device, &config);
|
||||
|
||||
log::info!("Initializing the example...");
|
||||
let mut example = E::init(&config, &adapter, &device, &queue);
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
let mut last_frame_inst = Instant::now();
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
let (mut frame_count, mut accum_time) = (0, 0.0);
|
||||
|
||||
log::info!("Entering render loop...");
|
||||
event_loop.run(move |event, _, control_flow| {
|
||||
let _ = (&instance, &adapter); // force ownership by the closure
|
||||
*control_flow = if cfg!(feature = "metal-auto-capture") {
|
||||
ControlFlow::Exit
|
||||
} else {
|
||||
ControlFlow::Poll
|
||||
};
|
||||
match event {
|
||||
event::Event::RedrawEventsCleared => {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
spawner.run_until_stalled();
|
||||
|
||||
window.request_redraw();
|
||||
}
|
||||
event::Event::WindowEvent {
|
||||
event:
|
||||
WindowEvent::Resized(size)
|
||||
| WindowEvent::ScaleFactorChanged {
|
||||
new_inner_size: &mut size,
|
||||
..
|
||||
},
|
||||
..
|
||||
} => {
|
||||
// Once winit is fixed, the detection conditions here can be removed.
|
||||
// https://github.com/rust-windowing/winit/issues/2876
|
||||
let max_dimension = adapter.limits().max_texture_dimension_2d;
|
||||
if size.width > max_dimension || size.height > max_dimension {
|
||||
log::warn!(
|
||||
"The resizing size {:?} exceeds the limit of {}.",
|
||||
size,
|
||||
max_dimension
|
||||
);
|
||||
} else {
|
||||
log::info!("Resizing to {:?}", size);
|
||||
config.width = size.width.max(1);
|
||||
config.height = size.height.max(1);
|
||||
example.resize(&config, &device, &queue);
|
||||
surface.configure(&device, &config);
|
||||
}
|
||||
}
|
||||
event::Event::WindowEvent { event, .. } => match event {
|
||||
WindowEvent::KeyboardInput {
|
||||
input:
|
||||
event::KeyboardInput {
|
||||
virtual_keycode: Some(event::VirtualKeyCode::Escape),
|
||||
state: event::ElementState::Pressed,
|
||||
..
|
||||
},
|
||||
..
|
||||
}
|
||||
| WindowEvent::CloseRequested => {
|
||||
*control_flow = ControlFlow::Exit;
|
||||
}
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
WindowEvent::KeyboardInput {
|
||||
input:
|
||||
event::KeyboardInput {
|
||||
virtual_keycode: Some(event::VirtualKeyCode::R),
|
||||
state: event::ElementState::Pressed,
|
||||
..
|
||||
},
|
||||
..
|
||||
} => {
|
||||
println!("{:#?}", instance.generate_report());
|
||||
}
|
||||
_ => {
|
||||
example.update(event);
|
||||
}
|
||||
},
|
||||
event::Event::DeviceEvent {
|
||||
event:
|
||||
winit::event::DeviceEvent::MouseMotion {
|
||||
delta,
|
||||
},
|
||||
..
|
||||
} => {
|
||||
example.move_mouse(delta);
|
||||
},
|
||||
event::Event::RedrawRequested(_) => {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
{
|
||||
accum_time += last_frame_inst.elapsed().as_secs_f32();
|
||||
last_frame_inst = Instant::now();
|
||||
frame_count += 1;
|
||||
if frame_count == 100 {
|
||||
println!(
|
||||
"Avg frame time {}ms",
|
||||
accum_time * 1000.0 / frame_count as f32
|
||||
);
|
||||
accum_time = 0.0;
|
||||
frame_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
let frame = match surface.get_current_texture() {
|
||||
Ok(frame) => frame,
|
||||
Err(_) => {
|
||||
surface.configure(&device, &config);
|
||||
surface
|
||||
.get_current_texture()
|
||||
.expect("Failed to acquire next surface texture!")
|
||||
}
|
||||
};
|
||||
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
format: Some(surface_view_format),
|
||||
..wgpu::TextureViewDescriptor::default()
|
||||
});
|
||||
|
||||
example.render(&view, &device, &queue, &spawner);
|
||||
|
||||
frame.present();
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
{
|
||||
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
|
||||
let image_bitmap = offscreen_canvas_setup
|
||||
.offscreen_canvas
|
||||
.transfer_to_image_bitmap()
|
||||
.expect("couldn't transfer offscreen canvas to image bitmap.");
|
||||
offscreen_canvas_setup
|
||||
.bitmap_renderer
|
||||
.transfer_from_image_bitmap(&image_bitmap);
|
||||
|
||||
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub struct Spawner<'a> {
|
||||
executor: async_executor::LocalExecutor<'a>,
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
impl<'a> Spawner<'a> {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
executor: async_executor::LocalExecutor::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
|
||||
self.executor.spawn(future).detach();
|
||||
}
|
||||
|
||||
fn run_until_stalled(&self) {
|
||||
while self.executor.try_tick() {}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
pub struct Spawner {}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
impl Spawner {
|
||||
fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'static) {
|
||||
wasm_bindgen_futures::spawn_local(future);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub fn run<E: Example>(title: &str) {
|
||||
let setup = pollster::block_on(setup::<E>(title));
|
||||
start::<E>(setup);
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
pub fn run<E: Example>(title: &str) {
|
||||
use wasm_bindgen::prelude::*;
|
||||
|
||||
let title = title.to_owned();
|
||||
wasm_bindgen_futures::spawn_local(async move {
|
||||
let setup = setup::<E>(&title).await;
|
||||
let start_closure = Closure::once_into_js(move || start::<E>(setup));
|
||||
|
||||
// make sure to handle JS exceptions thrown inside start.
|
||||
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
|
||||
// This is required, because winit uses JS exception for control flow to escape from `run`.
|
||||
if let Err(error) = call_catch(&start_closure) {
|
||||
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
|
||||
e.message().includes("Using exceptions for control flow", 0)
|
||||
});
|
||||
|
||||
if !is_control_flow_exception {
|
||||
web_sys::console::error_1(&error);
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
extern "C" {
|
||||
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
|
||||
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
|
||||
/// specific key out of it.
|
||||
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
|
||||
let query_string = query.strip_prefix('?')?;
|
||||
|
||||
for pair in query_string.split('&') {
|
||||
let mut pair = pair.split('=');
|
||||
let key = pair.next()?;
|
||||
let value = pair.next()?;
|
||||
|
||||
if key == search_key {
|
||||
return Some(value);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
// This allows treating the framework as a standalone example,
|
||||
// thus avoiding listing the example names in `Cargo.toml`.
|
||||
#[allow(dead_code)]
|
||||
fn main() {}
|
989
src/graphics.rs
989
src/graphics.rs
@ -1,989 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{HashSet,HashMap};
|
||||
use strafesnet_common::map;
|
||||
use strafesnet_common::integer;
|
||||
use strafesnet_common::model::{self, ColorId, NormalId, PolygonIter, PositionId, RenderConfigId, TextureCoordinateId, VertexId};
|
||||
use wgpu::{util::DeviceExt,AstcBlock,AstcChannel};
|
||||
use crate::model_graphics::{self,IndexedGraphicsMeshOwnedRenderConfig,IndexedGraphicsMeshOwnedRenderConfigId,GraphicsMeshOwnedRenderConfig,GraphicsModelColor4,GraphicsModelOwned,GraphicsVertex};
|
||||
|
||||
struct Indices{
|
||||
count:u32,
|
||||
buf:wgpu::Buffer,
|
||||
format:wgpu::IndexFormat,
|
||||
}
|
||||
impl Indices{
|
||||
fn new<T:bytemuck::Pod>(device:&wgpu::Device,indices:&Vec<T>,format:wgpu::IndexFormat)->Self{
|
||||
Self{
|
||||
buf:device.create_buffer_init(&wgpu::util::BufferInitDescriptor{
|
||||
label:Some("Index"),
|
||||
contents:bytemuck::cast_slice(indices),
|
||||
usage:wgpu::BufferUsages::INDEX,
|
||||
}),
|
||||
count:indices.len() as u32,
|
||||
format,
|
||||
}
|
||||
}
|
||||
}
|
||||
struct GraphicsModel{
|
||||
indices:Indices,
|
||||
vertex_buf:wgpu::Buffer,
|
||||
bind_group:wgpu::BindGroup,
|
||||
instance_count:u32,
|
||||
}
|
||||
|
||||
struct GraphicsSamplers{
|
||||
repeat:wgpu::Sampler,
|
||||
}
|
||||
|
||||
struct GraphicsBindGroupLayouts{
|
||||
model:wgpu::BindGroupLayout,
|
||||
}
|
||||
|
||||
struct GraphicsBindGroups{
|
||||
camera:wgpu::BindGroup,
|
||||
skybox_texture:wgpu::BindGroup,
|
||||
}
|
||||
|
||||
struct GraphicsPipelines{
|
||||
skybox:wgpu::RenderPipeline,
|
||||
model:wgpu::RenderPipeline,
|
||||
}
|
||||
|
||||
struct GraphicsCamera{
|
||||
screen_size:glam::UVec2,
|
||||
fov:glam::Vec2,//slope
|
||||
//camera angles and such are extrapolated and passed in every time
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn perspective_rh(fov_x_slope:f32,fov_y_slope:f32,z_near:f32,z_far:f32)->glam::Mat4{
|
||||
//glam_assert!(z_near > 0.0 && z_far > 0.0);
|
||||
let r=z_far/(z_near-z_far);
|
||||
glam::Mat4::from_cols(
|
||||
glam::Vec4::new(1.0/fov_x_slope,0.0,0.0,0.0),
|
||||
glam::Vec4::new(0.0,1.0/fov_y_slope,0.0,0.0),
|
||||
glam::Vec4::new(0.0,0.0,r,-1.0),
|
||||
glam::Vec4::new(0.0,0.0,r*z_near,0.0),
|
||||
)
|
||||
}
|
||||
impl GraphicsCamera{
|
||||
pub fn proj(&self)->glam::Mat4{
|
||||
perspective_rh(self.fov.x,self.fov.y,0.4,4000.0)
|
||||
}
|
||||
pub fn world(&self,pos:glam::Vec3,angles:glam::Vec2)->glam::Mat4{
|
||||
//f32 good enough for view matrix
|
||||
glam::Mat4::from_translation(pos)*glam::Mat4::from_euler(glam::EulerRot::YXZ,angles.x,angles.y,0f32)
|
||||
}
|
||||
|
||||
pub fn to_uniform_data(&self,pos:glam::Vec3,angles:glam::Vec2)->[f32;16*4]{
|
||||
let proj=self.proj();
|
||||
let proj_inv=proj.inverse();
|
||||
let view_inv=self.world(pos,angles);
|
||||
let view=view_inv.inverse();
|
||||
|
||||
let mut raw=[0f32; 16 * 4];
|
||||
raw[..16].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj)[..]);
|
||||
raw[16..32].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj_inv)[..]);
|
||||
raw[32..48].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&view)[..]);
|
||||
raw[48..64].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&view_inv)[..]);
|
||||
raw
|
||||
}
|
||||
}
|
||||
impl std::default::Default for GraphicsCamera{
|
||||
fn default()->Self{
|
||||
Self{
|
||||
screen_size:glam::UVec2::ONE,
|
||||
fov:glam::Vec2::ONE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FrameState{
|
||||
pub body:crate::physics::Body,
|
||||
pub camera:crate::physics::PhysicsCamera,
|
||||
pub time:integer::Time,
|
||||
}
|
||||
|
||||
pub struct GraphicsState{
|
||||
pipelines:GraphicsPipelines,
|
||||
bind_groups:GraphicsBindGroups,
|
||||
bind_group_layouts:GraphicsBindGroupLayouts,
|
||||
samplers:GraphicsSamplers,
|
||||
camera:GraphicsCamera,
|
||||
camera_buf:wgpu::Buffer,
|
||||
temp_squid_texture_view:wgpu::TextureView,
|
||||
models:Vec<GraphicsModel>,
|
||||
depth_view:wgpu::TextureView,
|
||||
staging_belt:wgpu::util::StagingBelt,
|
||||
}
|
||||
|
||||
impl GraphicsState{
|
||||
const DEPTH_FORMAT:wgpu::TextureFormat=wgpu::TextureFormat::Depth24Plus;
|
||||
fn create_depth_texture(
|
||||
config:&wgpu::SurfaceConfiguration,
|
||||
device:&wgpu::Device,
|
||||
)->wgpu::TextureView{
|
||||
let depth_texture=device.create_texture(&wgpu::TextureDescriptor{
|
||||
size:wgpu::Extent3d{
|
||||
width:config.width,
|
||||
height:config.height,
|
||||
depth_or_array_layers:1,
|
||||
},
|
||||
mip_level_count:1,
|
||||
sample_count:1,
|
||||
dimension:wgpu::TextureDimension::D2,
|
||||
format:Self::DEPTH_FORMAT,
|
||||
usage:wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
label:None,
|
||||
view_formats:&[],
|
||||
});
|
||||
|
||||
depth_texture.create_view(&wgpu::TextureViewDescriptor::default())
|
||||
}
|
||||
pub fn clear(&mut self){
|
||||
self.models.clear();
|
||||
}
|
||||
pub fn load_user_settings(&mut self,user_settings:&crate::settings::UserSettings){
|
||||
self.camera.fov=user_settings.calculate_fov(1.0,&self.camera.screen_size).as_vec2();
|
||||
}
|
||||
pub fn generate_models(&mut self,device:&wgpu::Device,queue:&wgpu::Queue,map:&map::CompleteMap){
|
||||
//generate texture view per texture
|
||||
let texture_views:HashMap<strafesnet_common::model::TextureId,wgpu::TextureView>=map.textures.iter().enumerate().filter_map(|(texture_id,texture_data)|{
|
||||
let texture_id=model::TextureId::new(texture_id as u32);
|
||||
let image=match ddsfile::Dds::read(std::io::Cursor::new(texture_data)){
|
||||
Ok(image)=>image,
|
||||
Err(e)=>{
|
||||
println!("Error loading texture: {e}");
|
||||
return None;
|
||||
},
|
||||
};
|
||||
|
||||
let (mut width,mut height)=(image.get_width(),image.get_height());
|
||||
|
||||
let format=match image.header10.unwrap().dxgi_format{
|
||||
ddsfile::DxgiFormat::R8G8B8A8_UNorm_sRGB=>wgpu::TextureFormat::Rgba8UnormSrgb,
|
||||
ddsfile::DxgiFormat::BC7_UNorm_sRGB =>{
|
||||
//floor(w,4),should be ceil(w,4)
|
||||
width=width/4*4;
|
||||
height=height/4*4;
|
||||
wgpu::TextureFormat::Bc7RgbaUnormSrgb
|
||||
},
|
||||
other=>{
|
||||
println!("unsupported texture format{:?}",other);
|
||||
return None;
|
||||
},
|
||||
};
|
||||
|
||||
let size=wgpu::Extent3d{
|
||||
width,
|
||||
height,
|
||||
depth_or_array_layers:1,
|
||||
};
|
||||
|
||||
let layer_size=wgpu::Extent3d{
|
||||
depth_or_array_layers:1,
|
||||
..size
|
||||
};
|
||||
let max_mips=layer_size.max_mips(wgpu::TextureDimension::D2);
|
||||
|
||||
let texture=device.create_texture_with_data(
|
||||
queue,
|
||||
&wgpu::TextureDescriptor{
|
||||
size,
|
||||
mip_level_count:max_mips,
|
||||
sample_count:1,
|
||||
dimension:wgpu::TextureDimension::D2,
|
||||
format,
|
||||
usage:wgpu::TextureUsages::TEXTURE_BINDING|wgpu::TextureUsages::COPY_DST,
|
||||
label:Some(format!("Texture{}",texture_id.get()).as_str()),
|
||||
view_formats:&[],
|
||||
},
|
||||
wgpu::util::TextureDataOrder::LayerMajor,
|
||||
&image.data,
|
||||
);
|
||||
Some((texture_id,texture.create_view(&wgpu::TextureViewDescriptor{
|
||||
label:Some(format!("Texture{} View",texture_id.get()).as_str()),
|
||||
dimension:Some(wgpu::TextureViewDimension::D2),
|
||||
..wgpu::TextureViewDescriptor::default()
|
||||
})))
|
||||
}).collect();
|
||||
let num_textures=texture_views.len();
|
||||
|
||||
//split groups with different textures into separate models
|
||||
//the models received here are supposed to be tightly packed,i.e. no code needs to check if two models are using the same groups.
|
||||
let indexed_models_len=map.models.len();
|
||||
//models split into graphics_group.RenderConfigId
|
||||
let mut owned_mesh_id_from_mesh_id_render_config_id:HashMap<model::MeshId,HashMap<RenderConfigId,IndexedGraphicsMeshOwnedRenderConfigId>>=HashMap::new();
|
||||
let mut unique_render_config_models:Vec<IndexedGraphicsMeshOwnedRenderConfig>=Vec::with_capacity(indexed_models_len);
|
||||
for model in &map.models{
|
||||
//wow
|
||||
let instance=GraphicsModelOwned{
|
||||
transform:model.transform.into(),
|
||||
normal_transform:glam::Mat3::from_cols_array_2d(&model.transform.matrix3.to_array().map(|row|row.map(Into::into))).inverse().transpose(),
|
||||
color:GraphicsModelColor4::new(model.color),
|
||||
};
|
||||
//get or create owned mesh map
|
||||
let owned_mesh_map=owned_mesh_id_from_mesh_id_render_config_id
|
||||
.entry(model.mesh).or_insert_with(||{
|
||||
let mut owned_mesh_map=HashMap::new();
|
||||
//add mesh if renderid never before seen for this model
|
||||
//add instance
|
||||
//convert Model into GraphicsModelOwned
|
||||
//check each group, if it's using a new render config then make a new clone of the model
|
||||
if let Some(mesh)=map.meshes.get(model.mesh.get() as usize){
|
||||
for graphics_group in mesh.graphics_groups.iter(){
|
||||
//get or create owned mesh
|
||||
let owned_mesh_id=owned_mesh_map
|
||||
.entry(graphics_group.render).or_insert_with(||{
|
||||
//create
|
||||
let owned_mesh_id=IndexedGraphicsMeshOwnedRenderConfigId::new(unique_render_config_models.len() as u32);
|
||||
unique_render_config_models.push(IndexedGraphicsMeshOwnedRenderConfig{
|
||||
unique_pos:mesh.unique_pos.iter().map(|v|v.to_array().map(Into::into)).collect(),
|
||||
unique_tex:mesh.unique_tex.iter().map(|v|*v.as_ref()).collect(),
|
||||
unique_normal:mesh.unique_normal.iter().map(|v|v.to_array().map(Into::into)).collect(),
|
||||
unique_color:mesh.unique_color.iter().map(|v|*v.as_ref()).collect(),
|
||||
unique_vertices:mesh.unique_vertices.clone(),
|
||||
render_config:graphics_group.render,
|
||||
polys:model::PolygonGroup::PolygonList(model::PolygonList::new(Vec::new())),
|
||||
instances:Vec::new(),
|
||||
});
|
||||
owned_mesh_id
|
||||
});
|
||||
let owned_mesh=unique_render_config_models.get_mut(owned_mesh_id.get() as usize).unwrap();
|
||||
match &mut owned_mesh.polys{
|
||||
model::PolygonGroup::PolygonList(polygon_list)=>polygon_list.extend(
|
||||
graphics_group.groups.iter().flat_map(|polygon_group_id|{
|
||||
mesh.polygon_groups[polygon_group_id.get() as usize].polys()
|
||||
})
|
||||
.map(|vertex_id_slice|
|
||||
vertex_id_slice.to_vec()
|
||||
)
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
owned_mesh_map
|
||||
});
|
||||
for owned_mesh_id in owned_mesh_map.values(){
|
||||
let owned_mesh=unique_render_config_models.get_mut(owned_mesh_id.get() as usize).unwrap();
|
||||
let render_config=&map.render_configs[owned_mesh.render_config.get() as usize];
|
||||
if model.color.w==0.0&&render_config.texture.is_none(){
|
||||
continue;
|
||||
}
|
||||
owned_mesh.instances.push(instance.clone());
|
||||
}
|
||||
}
|
||||
//check every model to see if it's using the same (texture,color) but has few instances,if it is combine it into one model
|
||||
//1. collect unique instances of texture and color,note model id
|
||||
//2. for each model id,check if removing it from the pool decreases both the model count and instance count by more than one
|
||||
//3. transpose all models that stay in the set
|
||||
|
||||
//best plan:benchmark set_bind_group,set_vertex_buffer,set_index_buffer and draw_indexed
|
||||
//check if the estimated render performance is better by transposing multiple model instances into one model instance
|
||||
|
||||
//for now:just deduplicate single models...
|
||||
let mut deduplicated_models=Vec::with_capacity(indexed_models_len);//use indexed_models_len because the list will likely get smaller instead of bigger
|
||||
let mut unique_texture_color=HashMap::new();//texture->color->vec![(model_id,instance_id)]
|
||||
for (model_id,model) in unique_render_config_models.iter().enumerate(){
|
||||
//for now:filter out models with more than one instance
|
||||
if 1<model.instances.len(){
|
||||
continue;
|
||||
}
|
||||
//populate hashmap
|
||||
let unique_color=unique_texture_color
|
||||
.entry(model.render_config)
|
||||
.or_insert_with(||HashMap::new());
|
||||
//separate instances by color
|
||||
for (instance_id,instance) in model.instances.iter().enumerate(){
|
||||
let model_instance_list=unique_color
|
||||
.entry(instance.color)
|
||||
.or_insert_with(||Vec::new());
|
||||
//add model instance to list
|
||||
model_instance_list.push((model_id,instance_id));
|
||||
}
|
||||
}
|
||||
//populate a hashset of models selected for transposition
|
||||
//construct transposed models
|
||||
let mut selected_model_instances=HashSet::new();
|
||||
for (render_config,unique_color) in unique_texture_color.into_iter(){
|
||||
for (color,model_instance_list) in unique_color.into_iter(){
|
||||
//world transforming one model does not meet the definition of deduplicaiton
|
||||
if 1<model_instance_list.len(){
|
||||
//create model
|
||||
let mut unique_pos=Vec::new();
|
||||
let mut pos_id_from=HashMap::new();
|
||||
let mut unique_tex=Vec::new();
|
||||
let mut tex_id_from=HashMap::new();
|
||||
let mut unique_normal=Vec::new();
|
||||
let mut normal_id_from=HashMap::new();
|
||||
let mut unique_color=Vec::new();
|
||||
let mut color_id_from=HashMap::new();
|
||||
let mut unique_vertices=Vec::new();
|
||||
let mut vertex_id_from=HashMap::new();
|
||||
|
||||
let mut polys=Vec::new();
|
||||
//transform instance vertices
|
||||
for (model_id,instance_id) in model_instance_list.into_iter(){
|
||||
//populate hashset to prevent these models from being copied
|
||||
selected_model_instances.insert(model_id);
|
||||
//there is only one instance per model
|
||||
let model=&unique_render_config_models[model_id];
|
||||
let instance=&model.instances[instance_id];
|
||||
//just hash word slices LOL
|
||||
let map_pos_id:Vec<PositionId>=model.unique_pos.iter().map(|untransformed_pos|{
|
||||
let pos=instance.transform.transform_point3(glam::Vec3::from_array(untransformed_pos.clone())).to_array();
|
||||
let h=bytemuck::cast::<[f32;3],[u32;3]>(pos);
|
||||
PositionId::new(*pos_id_from.entry(h).or_insert_with(||{
|
||||
let pos_id=unique_pos.len();
|
||||
unique_pos.push(pos);
|
||||
pos_id
|
||||
}) as u32)
|
||||
}).collect();
|
||||
let map_tex_id:Vec<TextureCoordinateId>=model.unique_tex.iter().map(|&tex|{
|
||||
let h=bytemuck::cast::<[f32;2],[u32;2]>(tex);
|
||||
TextureCoordinateId::new(*tex_id_from.entry(h).or_insert_with(||{
|
||||
let tex_id=unique_tex.len();
|
||||
unique_tex.push(tex);
|
||||
tex_id
|
||||
}) as u32)
|
||||
}).collect();
|
||||
let map_normal_id:Vec<NormalId>=model.unique_normal.iter().map(|untransformed_normal|{
|
||||
let normal=(instance.normal_transform*glam::Vec3::from_array(untransformed_normal.clone())).to_array();
|
||||
let h=bytemuck::cast::<[f32;3],[u32;3]>(normal);
|
||||
NormalId::new(*normal_id_from.entry(h).or_insert_with(||{
|
||||
let normal_id=unique_normal.len();
|
||||
unique_normal.push(normal);
|
||||
normal_id
|
||||
}) as u32)
|
||||
}).collect();
|
||||
let map_color_id:Vec<ColorId>=model.unique_color.iter().map(|&color|{
|
||||
let h=bytemuck::cast::<[f32;4],[u32;4]>(color);
|
||||
ColorId::new(*color_id_from.entry(h).or_insert_with(||{
|
||||
let color_id=unique_color.len();
|
||||
unique_color.push(color);
|
||||
color_id
|
||||
}) as u32)
|
||||
}).collect();
|
||||
//map the indexed vertices onto new indices
|
||||
//creating the vertex map is slightly different because the vertices are directly hashable
|
||||
let map_vertex_id:Vec<VertexId>=model.unique_vertices.iter().map(|unmapped_vertex|{
|
||||
let vertex=model::IndexedVertex{
|
||||
pos:map_pos_id[unmapped_vertex.pos.get() as usize],
|
||||
tex:map_tex_id[unmapped_vertex.tex.get() as usize],
|
||||
normal:map_normal_id[unmapped_vertex.normal.get() as usize],
|
||||
color:map_color_id[unmapped_vertex.color.get() as usize],
|
||||
};
|
||||
VertexId::new(*vertex_id_from.entry(vertex.clone()).or_insert_with(||{
|
||||
let vertex_id=unique_vertices.len();
|
||||
unique_vertices.push(vertex);
|
||||
vertex_id
|
||||
}) as u32)
|
||||
}).collect();
|
||||
polys.extend(model.polys.polys().map(|poly|
|
||||
poly.iter().map(|vertex_id|
|
||||
map_vertex_id[vertex_id.get() as usize]
|
||||
).collect()
|
||||
));
|
||||
}
|
||||
//push model into dedup
|
||||
deduplicated_models.push(IndexedGraphicsMeshOwnedRenderConfig{
|
||||
unique_pos,
|
||||
unique_tex,
|
||||
unique_normal,
|
||||
unique_color,
|
||||
unique_vertices,
|
||||
render_config,
|
||||
polys:model::PolygonGroup::PolygonList(model::PolygonList::new(polys)),
|
||||
instances:vec![GraphicsModelOwned{
|
||||
transform:glam::Mat4::IDENTITY,
|
||||
normal_transform:glam::Mat3::IDENTITY,
|
||||
color
|
||||
}],
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
//fill untouched models
|
||||
for (model_id,model) in unique_render_config_models.into_iter().enumerate(){
|
||||
if !selected_model_instances.contains(&model_id){
|
||||
deduplicated_models.push(model);
|
||||
}
|
||||
}
|
||||
|
||||
//de-index models
|
||||
let deduplicated_models_len=deduplicated_models.len();
|
||||
let models:Vec<GraphicsMeshOwnedRenderConfig>=deduplicated_models.into_iter().map(|model|{
|
||||
let mut vertices=Vec::new();
|
||||
let mut index_from_vertex=HashMap::new();//::<IndexedVertex,usize>
|
||||
//this mut be combined in a more complex way if the models use different render patterns per group
|
||||
let mut indices=Vec::new();
|
||||
for poly in model.polys.polys(){
|
||||
let mut poly_vertices=poly.iter()
|
||||
.map(|&vertex_index|*index_from_vertex.entry(vertex_index).or_insert_with(||{
|
||||
let i=vertices.len();
|
||||
let vertex=&model.unique_vertices[vertex_index.get() as usize];
|
||||
vertices.push(GraphicsVertex{
|
||||
pos:model.unique_pos[vertex.pos.get() as usize],
|
||||
tex:model.unique_tex[vertex.tex.get() as usize],
|
||||
normal:model.unique_normal[vertex.normal.get() as usize],
|
||||
color:model.unique_color[vertex.color.get() as usize],
|
||||
});
|
||||
i
|
||||
}));
|
||||
|
||||
let a=poly_vertices.next().unwrap();
|
||||
let mut b=poly_vertices.next().unwrap();
|
||||
|
||||
poly_vertices.for_each(|c|{
|
||||
indices.extend([a,b,c]);
|
||||
b=c;
|
||||
});
|
||||
}
|
||||
GraphicsMeshOwnedRenderConfig{
|
||||
instances:model.instances,
|
||||
indices:if (u32::MAX as usize)<vertices.len(){
|
||||
panic!("Model has too many vertices!")
|
||||
}else if (u16::MAX as usize)<vertices.len(){
|
||||
model_graphics::Indices::U32(indices.into_iter().map(|vertex_idx|vertex_idx as u32).collect())
|
||||
}else{
|
||||
model_graphics::Indices::U16(indices.into_iter().map(|vertex_idx|vertex_idx as u16).collect())
|
||||
},
|
||||
vertices,
|
||||
render_config:model.render_config,
|
||||
}
|
||||
}).collect();
|
||||
//.into_iter() the modeldata vec so entities can be /moved/ to models.entities
|
||||
let mut model_count=0;
|
||||
let mut instance_count=0;
|
||||
let uniform_buffer_binding_size=crate::setup::required_limits().max_uniform_buffer_binding_size as usize;
|
||||
let chunk_size=uniform_buffer_binding_size/MODEL_BUFFER_SIZE_BYTES;
|
||||
self.models.reserve(models.len());
|
||||
for model in models.into_iter(){
|
||||
instance_count+=model.instances.len();
|
||||
for instances_chunk in model.instances.rchunks(chunk_size){
|
||||
model_count+=1;
|
||||
let mut model_uniforms=get_instances_buffer_data(instances_chunk);
|
||||
//TEMP: fill with zeroes to pass validation
|
||||
model_uniforms.resize(MODEL_BUFFER_SIZE*512,0.0f32);
|
||||
let model_buf=device.create_buffer_init(&wgpu::util::BufferInitDescriptor{
|
||||
label:Some(format!("Model{} Buf",model_count).as_str()),
|
||||
contents:bytemuck::cast_slice(&model_uniforms),
|
||||
usage:wgpu::BufferUsages::UNIFORM|wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
let render_config=&map.render_configs[model.render_config.get() as usize];
|
||||
let texture_view=render_config.texture.and_then(|texture_id|
|
||||
texture_views.get(&texture_id)
|
||||
).unwrap_or(&self.temp_squid_texture_view);
|
||||
let bind_group=device.create_bind_group(&wgpu::BindGroupDescriptor{
|
||||
layout:&self.bind_group_layouts.model,
|
||||
entries:&[
|
||||
wgpu::BindGroupEntry{
|
||||
binding:0,
|
||||
resource:model_buf.as_entire_binding(),
|
||||
},
|
||||
wgpu::BindGroupEntry{
|
||||
binding:1,
|
||||
resource:wgpu::BindingResource::TextureView(texture_view),
|
||||
},
|
||||
wgpu::BindGroupEntry{
|
||||
binding:2,
|
||||
resource:wgpu::BindingResource::Sampler(&self.samplers.repeat),
|
||||
},
|
||||
],
|
||||
label:Some(format!("Model{} Bind Group",model_count).as_str()),
|
||||
});
|
||||
let vertex_buf=device.create_buffer_init(&wgpu::util::BufferInitDescriptor{
|
||||
label:Some("Vertex"),
|
||||
contents:bytemuck::cast_slice(&model.vertices),
|
||||
usage:wgpu::BufferUsages::VERTEX,
|
||||
});
|
||||
//all of these are being moved here
|
||||
self.models.push(GraphicsModel{
|
||||
instance_count:instances_chunk.len() as u32,
|
||||
vertex_buf,
|
||||
indices:match &model.indices{
|
||||
model_graphics::Indices::U32(indices)=>Indices::new(device,indices,wgpu::IndexFormat::Uint32),
|
||||
model_graphics::Indices::U16(indices)=>Indices::new(device,indices,wgpu::IndexFormat::Uint16),
|
||||
},
|
||||
bind_group,
|
||||
});
|
||||
}
|
||||
}
|
||||
println!("Texture References={}",num_textures);
|
||||
println!("Textures Loaded={}",texture_views.len());
|
||||
println!("Indexed Models={}",indexed_models_len);
|
||||
println!("Deduplicated Models={}",deduplicated_models_len);
|
||||
println!("Graphics Objects:{}",self.models.len());
|
||||
println!("Graphics Instances:{}",instance_count);
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
device:&wgpu::Device,
|
||||
queue:&wgpu::Queue,
|
||||
config:&wgpu::SurfaceConfiguration,
|
||||
)->Self{
|
||||
let camera_bind_group_layout=device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor{
|
||||
label:None,
|
||||
entries:&[
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
binding:0,
|
||||
visibility:wgpu::ShaderStages::VERTEX,
|
||||
ty:wgpu::BindingType::Buffer{
|
||||
ty:wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset:false,
|
||||
min_binding_size:None,
|
||||
},
|
||||
count:None,
|
||||
},
|
||||
],
|
||||
});
|
||||
let skybox_texture_bind_group_layout=device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor{
|
||||
label:Some("Skybox Texture Bind Group Layout"),
|
||||
entries:&[
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
binding:0,
|
||||
visibility:wgpu::ShaderStages::FRAGMENT,
|
||||
ty:wgpu::BindingType::Texture{
|
||||
sample_type:wgpu::TextureSampleType::Float{filterable:true},
|
||||
multisampled:false,
|
||||
view_dimension:wgpu::TextureViewDimension::Cube,
|
||||
},
|
||||
count:None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
binding:1,
|
||||
visibility:wgpu::ShaderStages::FRAGMENT,
|
||||
ty:wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count:None,
|
||||
},
|
||||
],
|
||||
});
|
||||
let model_bind_group_layout=device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor{
|
||||
label:Some("Model Bind Group Layout"),
|
||||
entries:&[
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
binding:0,
|
||||
visibility:wgpu::ShaderStages::VERTEX,
|
||||
ty:wgpu::BindingType::Buffer{
|
||||
ty:wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset:false,
|
||||
min_binding_size:None,
|
||||
},
|
||||
count:None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
binding:1,
|
||||
visibility:wgpu::ShaderStages::FRAGMENT,
|
||||
ty:wgpu::BindingType::Texture{
|
||||
sample_type:wgpu::TextureSampleType::Float{filterable:true},
|
||||
multisampled:false,
|
||||
view_dimension:wgpu::TextureViewDimension::D2,
|
||||
},
|
||||
count:None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
binding:2,
|
||||
visibility:wgpu::ShaderStages::FRAGMENT,
|
||||
ty:wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count:None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let clamp_sampler=device.create_sampler(&wgpu::SamplerDescriptor{
|
||||
label:Some("Clamp Sampler"),
|
||||
address_mode_u:wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v:wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w:wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter:wgpu::FilterMode::Linear,
|
||||
min_filter:wgpu::FilterMode::Linear,
|
||||
mipmap_filter:wgpu::FilterMode::Linear,
|
||||
..Default::default()
|
||||
});
|
||||
let repeat_sampler=device.create_sampler(&wgpu::SamplerDescriptor{
|
||||
label:Some("Repeat Sampler"),
|
||||
address_mode_u:wgpu::AddressMode::Repeat,
|
||||
address_mode_v:wgpu::AddressMode::Repeat,
|
||||
address_mode_w:wgpu::AddressMode::Repeat,
|
||||
mag_filter:wgpu::FilterMode::Linear,
|
||||
min_filter:wgpu::FilterMode::Linear,
|
||||
mipmap_filter:wgpu::FilterMode::Linear,
|
||||
anisotropy_clamp:16,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Create the render pipeline
|
||||
let shader=device.create_shader_module(wgpu::ShaderModuleDescriptor{
|
||||
label:None,
|
||||
source:wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
|
||||
});
|
||||
|
||||
//load textures
|
||||
let device_features=device.features();
|
||||
|
||||
let skybox_texture_view={
|
||||
let skybox_format=if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ASTC){
|
||||
println!("Using ASTC");
|
||||
wgpu::TextureFormat::Astc{
|
||||
block:AstcBlock::B4x4,
|
||||
channel:AstcChannel::UnormSrgb,
|
||||
}
|
||||
}else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ETC2){
|
||||
println!("Using ETC2");
|
||||
wgpu::TextureFormat::Etc2Rgb8UnormSrgb
|
||||
}else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_BC){
|
||||
println!("Using BC");
|
||||
wgpu::TextureFormat::Bc1RgbaUnormSrgb
|
||||
}else{
|
||||
println!("Using plain");
|
||||
wgpu::TextureFormat::Bgra8UnormSrgb
|
||||
};
|
||||
|
||||
let bytes=match skybox_format{
|
||||
wgpu::TextureFormat::Astc{
|
||||
block:AstcBlock::B4x4,
|
||||
channel:AstcChannel::UnormSrgb,
|
||||
}=>&include_bytes!("../images/astc.dds")[..],
|
||||
wgpu::TextureFormat::Etc2Rgb8UnormSrgb=>&include_bytes!("../images/etc2.dds")[..],
|
||||
wgpu::TextureFormat::Bc1RgbaUnormSrgb=>&include_bytes!("../images/bc1.dds")[..],
|
||||
wgpu::TextureFormat::Bgra8UnormSrgb=>&include_bytes!("../images/bgra.dds")[..],
|
||||
_=>unreachable!(),
|
||||
};
|
||||
|
||||
let skybox_image=ddsfile::Dds::read(&mut std::io::Cursor::new(bytes)).unwrap();
|
||||
|
||||
let size=wgpu::Extent3d{
|
||||
width:skybox_image.get_width(),
|
||||
height:skybox_image.get_height(),
|
||||
depth_or_array_layers:6,
|
||||
};
|
||||
|
||||
let layer_size=wgpu::Extent3d{
|
||||
depth_or_array_layers:1,
|
||||
..size
|
||||
};
|
||||
let max_mips=layer_size.max_mips(wgpu::TextureDimension::D2);
|
||||
|
||||
let skybox_texture=device.create_texture_with_data(
|
||||
queue,
|
||||
&wgpu::TextureDescriptor{
|
||||
size,
|
||||
mip_level_count:max_mips,
|
||||
sample_count:1,
|
||||
dimension:wgpu::TextureDimension::D2,
|
||||
format:skybox_format,
|
||||
usage:wgpu::TextureUsages::TEXTURE_BINDING|wgpu::TextureUsages::COPY_DST,
|
||||
label:Some("Skybox Texture"),
|
||||
view_formats:&[],
|
||||
},
|
||||
wgpu::util::TextureDataOrder::LayerMajor,
|
||||
&skybox_image.data,
|
||||
);
|
||||
|
||||
skybox_texture.create_view(&wgpu::TextureViewDescriptor{
|
||||
label:Some("Skybox Texture View"),
|
||||
dimension:Some(wgpu::TextureViewDimension::Cube),
|
||||
..wgpu::TextureViewDescriptor::default()
|
||||
})
|
||||
};
|
||||
|
||||
//squid
|
||||
let squid_texture_view={
|
||||
let bytes=include_bytes!("../images/squid.dds");
|
||||
|
||||
let image=ddsfile::Dds::read(&mut std::io::Cursor::new(bytes)).unwrap();
|
||||
|
||||
let size=wgpu::Extent3d{
|
||||
width:image.get_width(),
|
||||
height:image.get_height(),
|
||||
depth_or_array_layers:1,
|
||||
};
|
||||
|
||||
let layer_size=wgpu::Extent3d{
|
||||
depth_or_array_layers:1,
|
||||
..size
|
||||
};
|
||||
let max_mips=layer_size.max_mips(wgpu::TextureDimension::D2);
|
||||
|
||||
let texture=device.create_texture_with_data(
|
||||
queue,
|
||||
&wgpu::TextureDescriptor{
|
||||
size,
|
||||
mip_level_count:max_mips,
|
||||
sample_count:1,
|
||||
dimension:wgpu::TextureDimension::D2,
|
||||
format:wgpu::TextureFormat::Bc7RgbaUnorm,
|
||||
usage:wgpu::TextureUsages::TEXTURE_BINDING|wgpu::TextureUsages::COPY_DST,
|
||||
label:Some("Squid Texture"),
|
||||
view_formats:&[],
|
||||
},
|
||||
wgpu::util::TextureDataOrder::LayerMajor,
|
||||
&image.data,
|
||||
);
|
||||
|
||||
texture.create_view(&wgpu::TextureViewDescriptor{
|
||||
label:Some("Squid Texture View"),
|
||||
dimension:Some(wgpu::TextureViewDimension::D2),
|
||||
..wgpu::TextureViewDescriptor::default()
|
||||
})
|
||||
};
|
||||
|
||||
let model_pipeline_layout=device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor{
|
||||
label:None,
|
||||
bind_group_layouts:&[
|
||||
&camera_bind_group_layout,
|
||||
&skybox_texture_bind_group_layout,
|
||||
&model_bind_group_layout,
|
||||
],
|
||||
push_constant_ranges:&[],
|
||||
});
|
||||
let sky_pipeline_layout=device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor{
|
||||
label:None,
|
||||
bind_group_layouts:&[
|
||||
&camera_bind_group_layout,
|
||||
&skybox_texture_bind_group_layout,
|
||||
],
|
||||
push_constant_ranges:&[],
|
||||
});
|
||||
|
||||
// Create the render pipelines
|
||||
let sky_pipeline=device.create_render_pipeline(&wgpu::RenderPipelineDescriptor{
|
||||
label:Some("Sky Pipeline"),
|
||||
layout:Some(&sky_pipeline_layout),
|
||||
vertex:wgpu::VertexState{
|
||||
module:&shader,
|
||||
entry_point:"vs_sky",
|
||||
buffers:&[],
|
||||
compilation_options:wgpu::PipelineCompilationOptions::default(),
|
||||
},
|
||||
fragment:Some(wgpu::FragmentState{
|
||||
module:&shader,
|
||||
entry_point:"fs_sky",
|
||||
targets:&[Some(config.view_formats[0].into())],
|
||||
compilation_options:wgpu::PipelineCompilationOptions::default(),
|
||||
}),
|
||||
primitive:wgpu::PrimitiveState{
|
||||
front_face:wgpu::FrontFace::Cw,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil:Some(wgpu::DepthStencilState{
|
||||
format:Self::DEPTH_FORMAT,
|
||||
depth_write_enabled:false,
|
||||
depth_compare:wgpu::CompareFunction::LessEqual,
|
||||
stencil:wgpu::StencilState::default(),
|
||||
bias:wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample:wgpu::MultisampleState::default(),
|
||||
multiview:None,
|
||||
cache:None,
|
||||
});
|
||||
let model_pipeline=device.create_render_pipeline(&wgpu::RenderPipelineDescriptor{
|
||||
label:Some("Model Pipeline"),
|
||||
layout:Some(&model_pipeline_layout),
|
||||
vertex:wgpu::VertexState{
|
||||
module:&shader,
|
||||
entry_point:"vs_entity_texture",
|
||||
buffers:&[wgpu::VertexBufferLayout{
|
||||
array_stride:std::mem::size_of::<GraphicsVertex>() as wgpu::BufferAddress,
|
||||
step_mode:wgpu::VertexStepMode::Vertex,
|
||||
attributes:&wgpu::vertex_attr_array![0=>Float32x3,1=>Float32x2,2=>Float32x3,3=>Float32x4],
|
||||
}],
|
||||
compilation_options:wgpu::PipelineCompilationOptions::default(),
|
||||
},
|
||||
fragment:Some(wgpu::FragmentState{
|
||||
module:&shader,
|
||||
entry_point:"fs_entity_texture",
|
||||
targets:&[Some(config.view_formats[0].into())],
|
||||
compilation_options:wgpu::PipelineCompilationOptions::default(),
|
||||
}),
|
||||
primitive:wgpu::PrimitiveState{
|
||||
front_face:wgpu::FrontFace::Cw,
|
||||
cull_mode:Some(wgpu::Face::Front),
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil:Some(wgpu::DepthStencilState{
|
||||
format:Self::DEPTH_FORMAT,
|
||||
depth_write_enabled:true,
|
||||
depth_compare:wgpu::CompareFunction::LessEqual,
|
||||
stencil:wgpu::StencilState::default(),
|
||||
bias:wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample:wgpu::MultisampleState::default(),
|
||||
multiview:None,
|
||||
cache:None,
|
||||
});
|
||||
|
||||
let camera=GraphicsCamera::default();
|
||||
let camera_uniforms=camera.to_uniform_data(glam::Vec3::ZERO,glam::Vec2::ZERO);
|
||||
let camera_buf=device.create_buffer_init(&wgpu::util::BufferInitDescriptor{
|
||||
label:Some("Camera"),
|
||||
contents:bytemuck::cast_slice(&camera_uniforms),
|
||||
usage:wgpu::BufferUsages::UNIFORM|wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
let camera_bind_group=device.create_bind_group(&wgpu::BindGroupDescriptor{
|
||||
layout:&camera_bind_group_layout,
|
||||
entries:&[
|
||||
wgpu::BindGroupEntry{
|
||||
binding:0,
|
||||
resource:camera_buf.as_entire_binding(),
|
||||
},
|
||||
],
|
||||
label:Some("Camera"),
|
||||
});
|
||||
|
||||
let skybox_texture_bind_group=device.create_bind_group(&wgpu::BindGroupDescriptor{
|
||||
layout:&skybox_texture_bind_group_layout,
|
||||
entries:&[
|
||||
wgpu::BindGroupEntry{
|
||||
binding:0,
|
||||
resource:wgpu::BindingResource::TextureView(&skybox_texture_view),
|
||||
},
|
||||
wgpu::BindGroupEntry{
|
||||
binding:1,
|
||||
resource:wgpu::BindingResource::Sampler(&clamp_sampler),
|
||||
},
|
||||
],
|
||||
label:Some("Sky Texture"),
|
||||
});
|
||||
|
||||
let depth_view=Self::create_depth_texture(config,device);
|
||||
|
||||
Self{
|
||||
pipelines:GraphicsPipelines{
|
||||
skybox:sky_pipeline,
|
||||
model:model_pipeline
|
||||
},
|
||||
bind_groups:GraphicsBindGroups{
|
||||
camera:camera_bind_group,
|
||||
skybox_texture:skybox_texture_bind_group,
|
||||
},
|
||||
camera,
|
||||
camera_buf,
|
||||
models:Vec::new(),
|
||||
depth_view,
|
||||
staging_belt:wgpu::util::StagingBelt::new(0x100),
|
||||
bind_group_layouts:GraphicsBindGroupLayouts{model:model_bind_group_layout},
|
||||
samplers:GraphicsSamplers{repeat:repeat_sampler},
|
||||
temp_squid_texture_view:squid_texture_view,
|
||||
}
|
||||
}
|
||||
pub fn resize(
|
||||
&mut self,
|
||||
device:&wgpu::Device,
|
||||
config:&wgpu::SurfaceConfiguration,
|
||||
user_settings:&crate::settings::UserSettings,
|
||||
){
|
||||
self.depth_view=Self::create_depth_texture(config,device);
|
||||
self.camera.screen_size=glam::uvec2(config.width,config.height);
|
||||
self.load_user_settings(user_settings);
|
||||
}
|
||||
pub fn render(
|
||||
&mut self,
|
||||
view:&wgpu::TextureView,
|
||||
device:&wgpu::Device,
|
||||
queue:&wgpu::Queue,
|
||||
frame_state:FrameState,
|
||||
){
|
||||
//TODO:use scheduled frame times to create beautiful smoothing simulation physics extrapolation assuming no input
|
||||
|
||||
let mut encoder=device.create_command_encoder(&wgpu::CommandEncoderDescriptor{label:None});
|
||||
|
||||
// update rotation
|
||||
let camera_uniforms=self.camera.to_uniform_data(
|
||||
frame_state.body.extrapolated_position(frame_state.time).map(Into::<f32>::into).to_array().into(),
|
||||
frame_state.camera.simulate_move_angles(glam::IVec2::ZERO)
|
||||
);
|
||||
self.staging_belt
|
||||
.write_buffer(
|
||||
&mut encoder,
|
||||
&self.camera_buf,
|
||||
0,
|
||||
wgpu::BufferSize::new((camera_uniforms.len() * 4) as wgpu::BufferAddress).unwrap(),
|
||||
device,
|
||||
)
|
||||
.copy_from_slice(bytemuck::cast_slice(&camera_uniforms));
|
||||
//This code only needs to run when the uniforms change
|
||||
/*
|
||||
for model in self.models.iter(){
|
||||
let model_uniforms=get_instances_buffer_data(&model.instances);
|
||||
self.staging_belt
|
||||
.write_buffer(
|
||||
&mut encoder,
|
||||
&model.model_buf,//description of where data will be written when command is executed
|
||||
0,//offset in staging belt?
|
||||
wgpu::BufferSize::new((model_uniforms.len() * 4) as wgpu::BufferAddress).unwrap(),
|
||||
device,
|
||||
)
|
||||
.copy_from_slice(bytemuck::cast_slice(&model_uniforms));
|
||||
}
|
||||
*/
|
||||
self.staging_belt.finish();
|
||||
|
||||
{
|
||||
let mut rpass=encoder.begin_render_pass(&wgpu::RenderPassDescriptor{
|
||||
label:None,
|
||||
color_attachments:&[Some(wgpu::RenderPassColorAttachment{
|
||||
view,
|
||||
resolve_target:None,
|
||||
ops:wgpu::Operations{
|
||||
load:wgpu::LoadOp::Clear(wgpu::Color{
|
||||
r:0.1,
|
||||
g:0.2,
|
||||
b:0.3,
|
||||
a:1.0,
|
||||
}),
|
||||
store:wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment:Some(wgpu::RenderPassDepthStencilAttachment{
|
||||
view:&self.depth_view,
|
||||
depth_ops:Some(wgpu::Operations{
|
||||
load:wgpu::LoadOp::Clear(1.0),
|
||||
store:wgpu::StoreOp::Discard,
|
||||
}),
|
||||
stencil_ops:None,
|
||||
}),
|
||||
timestamp_writes:Default::default(),
|
||||
occlusion_query_set:Default::default(),
|
||||
});
|
||||
|
||||
rpass.set_bind_group(0,&self.bind_groups.camera,&[]);
|
||||
rpass.set_bind_group(1,&self.bind_groups.skybox_texture,&[]);
|
||||
|
||||
rpass.set_pipeline(&self.pipelines.model);
|
||||
for model in &self.models{
|
||||
rpass.set_bind_group(2,&model.bind_group,&[]);
|
||||
rpass.set_vertex_buffer(0,model.vertex_buf.slice(..));
|
||||
rpass.set_index_buffer(model.indices.buf.slice(..),model.indices.format);
|
||||
//TODO: loop over triangle strips
|
||||
rpass.draw_indexed(0..model.indices.count,0,0..model.instance_count);
|
||||
}
|
||||
|
||||
rpass.set_pipeline(&self.pipelines.skybox);
|
||||
rpass.draw(0..3,0..1);
|
||||
}
|
||||
|
||||
queue.submit(std::iter::once(encoder.finish()));
|
||||
|
||||
self.staging_belt.recall();
|
||||
}
|
||||
}
|
||||
const MODEL_BUFFER_SIZE:usize=4*4 + 12 + 4;//let size=std::mem::size_of::<ModelInstance>();
|
||||
const MODEL_BUFFER_SIZE_BYTES:usize=MODEL_BUFFER_SIZE*4;
|
||||
fn get_instances_buffer_data(instances:&[GraphicsModelOwned])->Vec<f32>{
|
||||
let mut raw=Vec::with_capacity(MODEL_BUFFER_SIZE*instances.len());
|
||||
for mi in instances{
|
||||
//model transform
|
||||
raw.extend_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&mi.transform)[..]);
|
||||
//normal transform
|
||||
raw.extend_from_slice(AsRef::<[f32; 3]>::as_ref(&mi.normal_transform.x_axis));
|
||||
raw.extend_from_slice(&[0.0]);
|
||||
raw.extend_from_slice(AsRef::<[f32; 3]>::as_ref(&mi.normal_transform.y_axis));
|
||||
raw.extend_from_slice(&[0.0]);
|
||||
raw.extend_from_slice(AsRef::<[f32; 3]>::as_ref(&mi.normal_transform.z_axis));
|
||||
raw.extend_from_slice(&[0.0]);
|
||||
//color
|
||||
raw.extend_from_slice(AsRef::<[f32; 4]>::as_ref(&mi.color.get()));
|
||||
}
|
||||
raw
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
pub enum Instruction{
|
||||
Render(crate::graphics::FrameState),
|
||||
//UpdateModel(crate::graphics::GraphicsModelUpdate),
|
||||
Resize(winit::dpi::PhysicalSize<u32>,crate::settings::UserSettings),
|
||||
ChangeMap(strafesnet_common::map::CompleteMap),
|
||||
}
|
||||
|
||||
//Ideally the graphics thread worker description is:
|
||||
/*
|
||||
WorkerDescription{
|
||||
input:Immediate,
|
||||
output:Realtime(PoolOrdering::Ordered(3)),
|
||||
}
|
||||
*/
|
||||
//up to three frames in flight, dropping new frame requests when all three are busy, and dropping output frames when one renders out of order
|
||||
|
||||
pub fn new<'a>(
|
||||
mut graphics:crate::graphics::GraphicsState,
|
||||
mut config:wgpu::SurfaceConfiguration,
|
||||
surface:wgpu::Surface<'a>,
|
||||
device:wgpu::Device,
|
||||
queue:wgpu::Queue,
|
||||
)->crate::compat_worker::INWorker<'a,Instruction>{
|
||||
let mut resize=None;
|
||||
crate::compat_worker::INWorker::new(move |ins:Instruction|{
|
||||
match ins{
|
||||
Instruction::ChangeMap(map)=>{
|
||||
graphics.clear();
|
||||
graphics.generate_models(&device,&queue,&map);
|
||||
},
|
||||
Instruction::Resize(size,user_settings)=>{
|
||||
resize=Some((size,user_settings));
|
||||
}
|
||||
Instruction::Render(frame_state)=>{
|
||||
if let Some((size,user_settings))=resize.take(){
|
||||
println!("Resizing to {:?}",size);
|
||||
let t0=std::time::Instant::now();
|
||||
config.width=size.width.max(1);
|
||||
config.height=size.height.max(1);
|
||||
surface.configure(&device,&config);
|
||||
graphics.resize(&device,&config,&user_settings);
|
||||
println!("Resize took {:?}",t0.elapsed());
|
||||
}
|
||||
//this has to go deeper somehow
|
||||
let frame=match surface.get_current_texture(){
|
||||
Ok(frame)=>frame,
|
||||
Err(_)=>{
|
||||
surface.configure(&device,&config);
|
||||
surface
|
||||
.get_current_texture()
|
||||
.expect("Failed to acquire next surface texture!")
|
||||
}
|
||||
};
|
||||
let view=frame.texture.create_view(&wgpu::TextureViewDescriptor{
|
||||
format:Some(config.view_formats[0]),
|
||||
..wgpu::TextureViewDescriptor::default()
|
||||
});
|
||||
|
||||
graphics.render(&view,&device,&queue,frame_state);
|
||||
|
||||
frame.present();
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
1
src/lib.rs
Normal file
1
src/lib.rs
Normal file
@ -0,0 +1 @@
|
||||
pub mod framework;
|
883
src/main.rs
883
src/main.rs
@ -1,17 +1,872 @@
|
||||
mod file;
|
||||
mod setup;
|
||||
mod window;
|
||||
mod worker;
|
||||
mod physics;
|
||||
mod graphics;
|
||||
mod settings;
|
||||
mod face_crawler;
|
||||
mod compat_worker;
|
||||
mod model_physics;
|
||||
mod model_graphics;
|
||||
mod physics_worker;
|
||||
mod graphics_worker;
|
||||
use bytemuck::{Pod, Zeroable};
|
||||
use std::{borrow::Cow, time::Instant};
|
||||
use wgpu::{util::DeviceExt, AstcBlock, AstcChannel};
|
||||
|
||||
const IMAGE_SIZE: u32 = 128;
|
||||
|
||||
#[derive(Clone, Copy, Pod, Zeroable)]
|
||||
#[repr(C)]
|
||||
struct Vertex {
|
||||
pos: [f32; 3],
|
||||
texture: [f32; 2],
|
||||
normal: [f32; 3],
|
||||
}
|
||||
|
||||
struct Entity {
|
||||
index_count: u32,
|
||||
index_buf: wgpu::Buffer,
|
||||
}
|
||||
|
||||
//temp?
|
||||
struct ModelData {
|
||||
transform: glam::Mat4,
|
||||
transform_depth: glam::Mat4,
|
||||
use_depth: glam::Vec4,
|
||||
vertex_buf: wgpu::Buffer,
|
||||
entities: Vec<Entity>,
|
||||
}
|
||||
|
||||
struct Model {
|
||||
transform: glam::Mat4,
|
||||
transform_depth: glam::Mat4,
|
||||
use_depth: glam::Vec4,
|
||||
vertex_buf: wgpu::Buffer,
|
||||
entities: Vec<Entity>,
|
||||
bind_group: wgpu::BindGroup,
|
||||
model_buf: wgpu::Buffer,
|
||||
}
|
||||
|
||||
// Note: we use the Y=up coordinate space in this example.
|
||||
struct Camera {
|
||||
time: Instant,
|
||||
pos: glam::Vec3,
|
||||
vel: glam::Vec3,
|
||||
gravity: glam::Vec3,
|
||||
friction: f32,
|
||||
screen_size: (u32, u32),
|
||||
offset: glam::Vec3,
|
||||
fov: f32,
|
||||
yaw: f32,
|
||||
pitch: f32,
|
||||
controls: u32,
|
||||
mv: f32,
|
||||
grounded: bool,
|
||||
walkspeed: f32,
|
||||
}
|
||||
|
||||
const CONTROL_MOVEFORWARD:u32 = 0b00000001;
|
||||
const CONTROL_MOVEBACK:u32 = 0b00000010;
|
||||
const CONTROL_MOVERIGHT:u32 = 0b00000100;
|
||||
const CONTROL_MOVELEFT:u32 = 0b00001000;
|
||||
const CONTROL_MOVEUP:u32 = 0b00010000;
|
||||
const CONTROL_MOVEDOWN:u32 = 0b00100000;
|
||||
const CONTROL_JUMP:u32 = 0b01000000;
|
||||
const CONTROL_ZOOM:u32 = 0b10000000;
|
||||
|
||||
const FORWARD_DIR:glam::Vec3 = glam::Vec3::new(0.0,0.0,-1.0);
|
||||
const RIGHT_DIR:glam::Vec3 = glam::Vec3::new(1.0,0.0,0.0);
|
||||
const UP_DIR:glam::Vec3 = glam::Vec3::new(0.0,1.0,0.0);
|
||||
|
||||
fn get_control_dir(controls: u32) -> glam::Vec3{
|
||||
//don't get fancy just do it
|
||||
let mut control_dir:glam::Vec3 = glam::Vec3::new(0.0,0.0,0.0);
|
||||
if controls & CONTROL_MOVEFORWARD == CONTROL_MOVEFORWARD {
|
||||
control_dir+=FORWARD_DIR;
|
||||
}
|
||||
if controls & CONTROL_MOVEBACK == CONTROL_MOVEBACK {
|
||||
control_dir+=-FORWARD_DIR;
|
||||
}
|
||||
if controls & CONTROL_MOVELEFT == CONTROL_MOVELEFT {
|
||||
control_dir+=-RIGHT_DIR;
|
||||
}
|
||||
if controls & CONTROL_MOVERIGHT == CONTROL_MOVERIGHT {
|
||||
control_dir+=RIGHT_DIR;
|
||||
}
|
||||
if controls & CONTROL_MOVEUP == CONTROL_MOVEUP {
|
||||
control_dir+=UP_DIR;
|
||||
}
|
||||
if controls & CONTROL_MOVEDOWN == CONTROL_MOVEDOWN {
|
||||
control_dir+=-UP_DIR;
|
||||
}
|
||||
return control_dir
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn perspective_rh(fov_y_slope: f32, aspect_ratio: f32, z_near: f32, z_far: f32) -> glam::Mat4 {
|
||||
//glam_assert!(z_near > 0.0 && z_far > 0.0);
|
||||
let r = z_far / (z_near - z_far);
|
||||
glam::Mat4::from_cols(
|
||||
glam::Vec4::new(1.0/(fov_y_slope * aspect_ratio), 0.0, 0.0, 0.0),
|
||||
glam::Vec4::new(0.0, 1.0/fov_y_slope, 0.0, 0.0),
|
||||
glam::Vec4::new(0.0, 0.0, r, -1.0),
|
||||
glam::Vec4::new(0.0, 0.0, r * z_near, 0.0),
|
||||
)
|
||||
}
|
||||
|
||||
impl Camera {
|
||||
fn to_uniform_data(&self) -> [f32; 16 * 3 + 4] {
|
||||
let aspect = self.screen_size.0 as f32 / self.screen_size.1 as f32;
|
||||
let fov = if self.controls&CONTROL_ZOOM==0 {
|
||||
self.fov
|
||||
}else{
|
||||
self.fov/5.0
|
||||
};
|
||||
let proj = perspective_rh(fov, aspect, 0.5, 1000.0);
|
||||
let proj_inv = proj.inverse();
|
||||
let view = glam::Mat4::from_translation(self.pos+self.offset) * glam::Mat4::from_euler(glam::EulerRot::YXZ, self.yaw, self.pitch, 0f32);
|
||||
let view_inv = view.inverse();
|
||||
|
||||
let mut raw = [0f32; 16 * 3 + 4];
|
||||
raw[..16].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj)[..]);
|
||||
raw[16..32].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj_inv)[..]);
|
||||
raw[32..48].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&view_inv)[..]);
|
||||
raw[48..52].copy_from_slice(AsRef::<[f32; 4]>::as_ref(&view.col(3)));
|
||||
raw
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Skybox {
|
||||
camera: Camera,
|
||||
sky_pipeline: wgpu::RenderPipeline,
|
||||
entity_pipeline: wgpu::RenderPipeline,
|
||||
ground_pipeline: wgpu::RenderPipeline,
|
||||
special_teapot: Model,
|
||||
checkered_pipeline: wgpu::RenderPipeline,
|
||||
depth_overwrite_pipeline: wgpu::RenderPipeline,
|
||||
main_bind_group: wgpu::BindGroup,
|
||||
camera_buf: wgpu::Buffer,
|
||||
models: Vec<Model>,
|
||||
depth_view: wgpu::TextureView,
|
||||
staging_belt: wgpu::util::StagingBelt,
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
impl Skybox {
|
||||
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth24Plus;
|
||||
|
||||
fn create_depth_texture(
|
||||
config: &wgpu::SurfaceConfiguration,
|
||||
device: &wgpu::Device,
|
||||
) -> wgpu::TextureView {
|
||||
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
size: wgpu::Extent3d {
|
||||
width: config.width,
|
||||
height: config.height,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: Self::DEPTH_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
|
||||
label: None,
|
||||
view_formats: &[],
|
||||
});
|
||||
|
||||
depth_texture.create_view(&wgpu::TextureViewDescriptor::default())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_model_uniform_data(model:&Model) -> [f32; 4*4*2+4] {
|
||||
let mut raw = [0f32; 4*4*2+4];
|
||||
raw[0..16].copy_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&model.transform)[..]);
|
||||
raw[16..32].copy_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&model.transform_depth)[..]);
|
||||
raw[32..36].copy_from_slice(AsRef::<[f32; 4]>::as_ref(&model.use_depth));
|
||||
raw
|
||||
}
|
||||
fn get_modeldata_uniform_data(model:&ModelData) -> [f32; 4*4*2+4] {
|
||||
let mut raw = [0f32; 4*4*2+4];
|
||||
raw[0..16].copy_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&model.transform)[..]);
|
||||
raw[16..32].copy_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&model.transform_depth)[..]);
|
||||
raw[32..36].copy_from_slice(AsRef::<[f32; 4]>::as_ref(&model.use_depth));
|
||||
raw
|
||||
}
|
||||
|
||||
fn add_obj(device:&wgpu::Device,modeldatas:& mut Vec<ModelData>,source:&[u8]){
|
||||
let data = obj::ObjData::load_buf(&source[..]).unwrap();
|
||||
let mut vertices = Vec::new();
|
||||
let mut vertex_index = std::collections::HashMap::<obj::IndexTuple,u16>::new();
|
||||
for object in data.objects {
|
||||
let mut entities = Vec::<Entity>::new();
|
||||
for group in object.groups {
|
||||
let mut indices = Vec::new();
|
||||
for poly in group.polys {
|
||||
for end_index in 2..poly.0.len() {
|
||||
for &index in &[0, end_index - 1, end_index] {
|
||||
let vert = poly.0[index];
|
||||
if let Some(&i)=vertex_index.get(&vert){
|
||||
indices.push(i as u16);
|
||||
}else{
|
||||
let i=vertices.len() as u16;
|
||||
vertices.push(Vertex {
|
||||
pos: data.position[vert.0],
|
||||
texture: data.texture[vert.1.unwrap()],
|
||||
normal: data.normal[vert.2.unwrap()],
|
||||
});
|
||||
vertex_index.insert(vert,i);
|
||||
indices.push(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Index"),
|
||||
contents: bytemuck::cast_slice(&indices),
|
||||
usage: wgpu::BufferUsages::INDEX,
|
||||
});
|
||||
entities.push(Entity {
|
||||
index_buf,
|
||||
index_count: indices.len() as u32,
|
||||
});
|
||||
}
|
||||
let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Vertex"),
|
||||
contents: bytemuck::cast_slice(&vertices),
|
||||
usage: wgpu::BufferUsages::VERTEX,
|
||||
});
|
||||
modeldatas.push(ModelData {
|
||||
transform: glam::Mat4::default(),
|
||||
transform_depth: glam::Mat4::default(),
|
||||
use_depth: glam::Vec4::ZERO,
|
||||
vertex_buf,
|
||||
entities,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl strafe_client::framework::Example for Skybox {
|
||||
fn optional_features() -> wgpu::Features {
|
||||
wgpu::Features::TEXTURE_COMPRESSION_ASTC
|
||||
| wgpu::Features::TEXTURE_COMPRESSION_ETC2
|
||||
| wgpu::Features::TEXTURE_COMPRESSION_BC
|
||||
}
|
||||
|
||||
fn init(
|
||||
config: &wgpu::SurfaceConfiguration,
|
||||
_adapter: &wgpu::Adapter,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
) -> Self {
|
||||
let mut modeldatas = Vec::<ModelData>::new();
|
||||
add_obj(device,& mut modeldatas,include_bytes!("../models/teslacyberv3.0.obj"));
|
||||
add_obj(device,& mut modeldatas,include_bytes!("../models/suzanne.obj"));
|
||||
add_obj(device,& mut modeldatas,include_bytes!("../models/teapot.obj"));
|
||||
println!("models.len = {:?}", modeldatas.len());
|
||||
modeldatas[1].transform=glam::Mat4::from_translation(glam::vec3(10.,5.,10.));
|
||||
|
||||
let proj1 = glam::Mat4::orthographic_rh(-20.0, 20.0, -20.0, 20.0, -20.0, 20.0);
|
||||
let model0 = glam::Mat4::from_translation(glam::vec3(-10.,5.,10.)) * glam::Mat4::from_scale(glam::vec3(10.0,10.0,10.0));
|
||||
|
||||
modeldatas[2].transform=model0;
|
||||
modeldatas[2].transform_depth=proj1;// * view1_inv
|
||||
modeldatas[2].use_depth=glam::Vec4::Z;
|
||||
|
||||
let main_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: None,
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::Cube,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
let model_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: None,
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// Create the render pipeline
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: None,
|
||||
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
|
||||
});
|
||||
|
||||
let camera = Camera {
|
||||
time: Instant::now(),
|
||||
pos: glam::Vec3::new(5.0,0.0,5.0),
|
||||
vel: glam::Vec3::new(0.0,0.0,0.0),
|
||||
gravity: glam::Vec3::new(0.0,-100.0,0.0),
|
||||
friction: 90.0,
|
||||
screen_size: (config.width, config.height),
|
||||
offset: glam::Vec3::new(0.0,4.5,0.0),
|
||||
fov: 1.0, //fov_slope = tan(fov_y/2)
|
||||
pitch: 0.0,
|
||||
yaw: 0.0,
|
||||
mv: 2.7,
|
||||
controls:0,
|
||||
grounded: true,
|
||||
walkspeed: 18.0,
|
||||
};
|
||||
let camera_uniforms = camera.to_uniform_data();
|
||||
let camera_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Camera"),
|
||||
contents: bytemuck::cast_slice(&camera_uniforms),
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: None,
|
||||
bind_group_layouts: &[&main_bind_group_layout, &model_bind_group_layout],
|
||||
push_constant_ranges: &[],
|
||||
});
|
||||
|
||||
// Create the render pipelines
|
||||
let sky_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Sky"),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: "vs_sky",
|
||||
buffers: &[],
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: "fs_sky",
|
||||
targets: &[Some(config.view_formats[0].into())],
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
front_face: wgpu::FrontFace::Cw,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: Self::DEPTH_FORMAT,
|
||||
depth_write_enabled: false,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
});
|
||||
let entity_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Entity"),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: "vs_entity",
|
||||
buffers: &[wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2, 2 => Float32x3],
|
||||
}],
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: "fs_entity",
|
||||
targets: &[Some(config.view_formats[0].into())],
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
front_face: wgpu::FrontFace::Cw,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: Self::DEPTH_FORMAT,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
});
|
||||
let ground_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Ground"),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: "vs_ground",
|
||||
buffers: &[],
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: "fs_ground",
|
||||
targets: &[Some(config.view_formats[0].into())],
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
front_face: wgpu::FrontFace::Cw,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: Self::DEPTH_FORMAT,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::LessEqual,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
});
|
||||
let checkered_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Checkered"),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: "vs_square",
|
||||
buffers: &[],
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: "fs_checkered",
|
||||
targets: &[Some(config.view_formats[0].into())],
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
front_face: wgpu::FrontFace::Cw,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: Self::DEPTH_FORMAT,
|
||||
depth_write_enabled: false,
|
||||
depth_compare: wgpu::CompareFunction::Always,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
});
|
||||
let depth_overwrite_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
|
||||
label: Some("Overwrite"),
|
||||
layout: Some(&pipeline_layout),
|
||||
vertex: wgpu::VertexState {
|
||||
module: &shader,
|
||||
entry_point: "vs_square",
|
||||
buffers: &[],
|
||||
},
|
||||
fragment: Some(wgpu::FragmentState {
|
||||
module: &shader,
|
||||
entry_point: "fs_overwrite",
|
||||
targets: &[Some(config.view_formats[0].into())],
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
front_face: wgpu::FrontFace::Cw,
|
||||
..Default::default()
|
||||
},
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: Self::DEPTH_FORMAT,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::Always,
|
||||
stencil: wgpu::StencilState::default(),
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
});
|
||||
|
||||
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: None,
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
mipmap_filter: wgpu::FilterMode::Linear,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let device_features = device.features();
|
||||
|
||||
let skybox_format = if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ASTC) {
|
||||
log::info!("Using ASTC");
|
||||
wgpu::TextureFormat::Astc {
|
||||
block: AstcBlock::B4x4,
|
||||
channel: AstcChannel::UnormSrgb,
|
||||
}
|
||||
} else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ETC2) {
|
||||
log::info!("Using ETC2");
|
||||
wgpu::TextureFormat::Etc2Rgb8UnormSrgb
|
||||
} else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_BC) {
|
||||
log::info!("Using BC");
|
||||
wgpu::TextureFormat::Bc1RgbaUnormSrgb
|
||||
} else {
|
||||
log::info!("Using plain");
|
||||
wgpu::TextureFormat::Bgra8UnormSrgb
|
||||
};
|
||||
|
||||
let size = wgpu::Extent3d {
|
||||
width: IMAGE_SIZE,
|
||||
height: IMAGE_SIZE,
|
||||
depth_or_array_layers: 6,
|
||||
};
|
||||
|
||||
let layer_size = wgpu::Extent3d {
|
||||
depth_or_array_layers: 1,
|
||||
..size
|
||||
};
|
||||
let max_mips = layer_size.max_mips(wgpu::TextureDimension::D2);
|
||||
|
||||
log::debug!(
|
||||
"Copying {:?} skybox images of size {}, {}, 6 with {} mips to gpu",
|
||||
skybox_format,
|
||||
IMAGE_SIZE,
|
||||
IMAGE_SIZE,
|
||||
max_mips,
|
||||
);
|
||||
|
||||
let bytes = match skybox_format {
|
||||
wgpu::TextureFormat::Astc {
|
||||
block: AstcBlock::B4x4,
|
||||
channel: AstcChannel::UnormSrgb,
|
||||
} => &include_bytes!("../images/astc.dds")[..],
|
||||
wgpu::TextureFormat::Etc2Rgb8UnormSrgb => &include_bytes!("../images/etc2.dds")[..],
|
||||
wgpu::TextureFormat::Bc1RgbaUnormSrgb => &include_bytes!("../images/bc1.dds")[..],
|
||||
wgpu::TextureFormat::Bgra8UnormSrgb => &include_bytes!("../images/bgra.dds")[..],
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let image = ddsfile::Dds::read(&mut std::io::Cursor::new(&bytes)).unwrap();
|
||||
|
||||
let texture = device.create_texture_with_data(
|
||||
queue,
|
||||
&wgpu::TextureDescriptor {
|
||||
size,
|
||||
mip_level_count: max_mips,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: skybox_format,
|
||||
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
|
||||
label: None,
|
||||
view_formats: &[],
|
||||
},
|
||||
&image.data,
|
||||
);
|
||||
|
||||
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
label: None,
|
||||
dimension: Some(wgpu::TextureViewDimension::Cube),
|
||||
..wgpu::TextureViewDescriptor::default()
|
||||
});
|
||||
let main_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &main_bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: camera_buf.as_entire_binding(),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::TextureView(&texture_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::Sampler(&sampler),
|
||||
},
|
||||
],
|
||||
label: Some("Camera"),
|
||||
});
|
||||
|
||||
//drain the modeldata vec so entities can be /moved/ to models.entities
|
||||
let mut models = Vec::<Model>::with_capacity(modeldatas.len());
|
||||
for (i,modeldata) in modeldatas.drain(..).enumerate() {
|
||||
let model_uniforms = get_modeldata_uniform_data(&modeldata);
|
||||
let model_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some(format!("Model{}",i).as_str()),
|
||||
contents: bytemuck::cast_slice(&model_uniforms),
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
let model_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &model_bind_group_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: model_buf.as_entire_binding(),
|
||||
},
|
||||
],
|
||||
label: Some(format!("Model{}",i).as_str()),
|
||||
});
|
||||
//all of these are being moved here
|
||||
models.push(Model{
|
||||
transform: modeldata.transform,
|
||||
transform_depth: modeldata.transform_depth,
|
||||
use_depth: modeldata.use_depth,
|
||||
vertex_buf:modeldata.vertex_buf,
|
||||
entities: modeldata.entities,
|
||||
bind_group: model_bind_group,
|
||||
model_buf,
|
||||
})
|
||||
}
|
||||
|
||||
let teapot = models.pop().unwrap();
|
||||
|
||||
let depth_view = Self::create_depth_texture(config, device);
|
||||
|
||||
Skybox {
|
||||
camera,
|
||||
sky_pipeline,
|
||||
entity_pipeline,
|
||||
ground_pipeline,
|
||||
special_teapot: teapot,
|
||||
checkered_pipeline,
|
||||
depth_overwrite_pipeline,
|
||||
main_bind_group,
|
||||
camera_buf,
|
||||
models,
|
||||
depth_view,
|
||||
staging_belt: wgpu::util::StagingBelt::new(0x100),
|
||||
start_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::single_match)]
|
||||
fn update(&mut self, event: winit::event::WindowEvent) {
|
||||
match event {
|
||||
winit::event::WindowEvent::KeyboardInput {
|
||||
input:
|
||||
winit::event::KeyboardInput {
|
||||
state,
|
||||
virtual_keycode: Some(keycode),
|
||||
..
|
||||
},
|
||||
..
|
||||
} => {
|
||||
match (state,keycode) {
|
||||
(k,winit::event::VirtualKeyCode::W) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEFORWARD,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEFORWARD,
|
||||
}
|
||||
(k,winit::event::VirtualKeyCode::A) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVELEFT,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVELEFT,
|
||||
}
|
||||
(k,winit::event::VirtualKeyCode::S) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEBACK,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEBACK,
|
||||
}
|
||||
(k,winit::event::VirtualKeyCode::D) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVERIGHT,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVERIGHT,
|
||||
}
|
||||
(k,winit::event::VirtualKeyCode::E) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEUP,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEUP,
|
||||
}
|
||||
(k,winit::event::VirtualKeyCode::Q) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_MOVEDOWN,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_MOVEDOWN,
|
||||
}
|
||||
(k,winit::event::VirtualKeyCode::Space) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_JUMP,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_JUMP,
|
||||
}
|
||||
(k,winit::event::VirtualKeyCode::Z) => match k {
|
||||
winit::event::ElementState::Pressed => self.camera.controls|=CONTROL_ZOOM,
|
||||
winit::event::ElementState::Released => self.camera.controls&=!CONTROL_ZOOM,
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn move_mouse(&mut self, delta: (f64,f64)) {
|
||||
self.camera.pitch=(self.camera.pitch as f64+delta.1/-2048.) as f32;
|
||||
self.camera.yaw=(self.camera.yaw as f64+delta.0/-2048.) as f32;
|
||||
}
|
||||
|
||||
fn resize(
|
||||
&mut self,
|
||||
config: &wgpu::SurfaceConfiguration,
|
||||
device: &wgpu::Device,
|
||||
_queue: &wgpu::Queue,
|
||||
) {
|
||||
self.depth_view = Self::create_depth_texture(config, device);
|
||||
self.camera.screen_size = (config.width, config.height);
|
||||
}
|
||||
|
||||
fn render(
|
||||
&mut self,
|
||||
view: &wgpu::TextureView,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
_spawner: &strafe_client::framework::Spawner,
|
||||
) {
|
||||
let time = Instant::now();
|
||||
|
||||
//physique
|
||||
let dt=(time-self.camera.time).as_secs_f32();
|
||||
self.camera.time=time;
|
||||
let camera_mat=glam::Mat3::from_euler(glam::EulerRot::YXZ,self.camera.yaw,0f32,0f32);
|
||||
let control_dir=camera_mat*get_control_dir(self.camera.controls&(CONTROL_MOVELEFT|CONTROL_MOVERIGHT|CONTROL_MOVEFORWARD|CONTROL_MOVEBACK)).normalize_or_zero();
|
||||
let d=self.camera.vel.dot(control_dir);
|
||||
if d<self.camera.mv {
|
||||
self.camera.vel+=(self.camera.mv-d)*control_dir;
|
||||
}
|
||||
self.camera.vel+=self.camera.gravity*dt;
|
||||
self.camera.pos+=self.camera.vel*dt;
|
||||
if self.camera.pos.y<0.0{
|
||||
self.camera.pos.y=0.0;
|
||||
self.camera.vel.y=0.0;
|
||||
self.camera.grounded=true;
|
||||
}
|
||||
if self.camera.grounded&&(self.camera.controls&CONTROL_JUMP)!=0 {
|
||||
self.camera.grounded=false;
|
||||
self.camera.vel+=glam::Vec3::new(0.0,0.715588/2.0*100.0,0.0);
|
||||
}
|
||||
if self.camera.grounded {
|
||||
let applied_friction=self.camera.friction*dt;
|
||||
let targetv=control_dir*self.camera.walkspeed;
|
||||
let diffv=targetv-self.camera.vel;
|
||||
if applied_friction*applied_friction<diffv.length_squared() {
|
||||
self.camera.vel+=applied_friction*diffv.normalize();
|
||||
} else {
|
||||
self.camera.vel=targetv;
|
||||
}
|
||||
}
|
||||
|
||||
let proj1 = glam::Mat4::orthographic_rh(-20.0, 20.0, -20.0, 20.0, -20.0, 20.0);
|
||||
let model1 = glam::Mat4::from_euler(glam::EulerRot::YXZ, self.start_time.elapsed().as_secs_f32(),0f32,0f32);
|
||||
|
||||
self.special_teapot.transform_depth=proj1 * model1;
|
||||
|
||||
let mut encoder =
|
||||
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
|
||||
|
||||
// update rotation
|
||||
let camera_uniforms = self.camera.to_uniform_data();
|
||||
self.staging_belt
|
||||
.write_buffer(
|
||||
&mut encoder,
|
||||
&self.camera_buf,
|
||||
0,
|
||||
wgpu::BufferSize::new((camera_uniforms.len() * 4) as wgpu::BufferAddress).unwrap(),
|
||||
device,
|
||||
)
|
||||
.copy_from_slice(bytemuck::cast_slice(&camera_uniforms));
|
||||
//special teapot
|
||||
{
|
||||
let model_uniforms = get_model_uniform_data(&self.special_teapot);
|
||||
self.staging_belt
|
||||
.write_buffer(
|
||||
&mut encoder,
|
||||
&self.special_teapot.model_buf,//description of where data will be written when command is executed
|
||||
0,//offset in staging belt?
|
||||
wgpu::BufferSize::new((model_uniforms.len() * 4) as wgpu::BufferAddress).unwrap(),
|
||||
device,
|
||||
)
|
||||
.copy_from_slice(bytemuck::cast_slice(&model_uniforms));
|
||||
}
|
||||
//This code only needs to run when the uniforms change
|
||||
for model in self.models.iter() {
|
||||
let model_uniforms = get_model_uniform_data(&model);
|
||||
self.staging_belt
|
||||
.write_buffer(
|
||||
&mut encoder,
|
||||
&model.model_buf,//description of where data will be written when command is executed
|
||||
0,//offset in staging belt?
|
||||
wgpu::BufferSize::new((model_uniforms.len() * 4) as wgpu::BufferAddress).unwrap(),
|
||||
device,
|
||||
)
|
||||
.copy_from_slice(bytemuck::cast_slice(&model_uniforms));
|
||||
}
|
||||
self.staging_belt.finish();
|
||||
|
||||
{
|
||||
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: None,
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view,
|
||||
resolve_target: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color {
|
||||
r: 0.1,
|
||||
g: 0.2,
|
||||
b: 0.3,
|
||||
a: 1.0,
|
||||
}),
|
||||
store: true,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
|
||||
view: &self.depth_view,
|
||||
depth_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(1.0),
|
||||
store: false,
|
||||
}),
|
||||
stencil_ops: None,
|
||||
}),
|
||||
});
|
||||
|
||||
rpass.set_bind_group(0, &self.main_bind_group, &[]);
|
||||
|
||||
//draw special teapot
|
||||
rpass.set_bind_group(1, &self.special_teapot.bind_group, &[]);
|
||||
rpass.set_pipeline(&self.checkered_pipeline);
|
||||
rpass.draw(0..6, 0..1);
|
||||
|
||||
rpass.set_pipeline(&self.entity_pipeline);
|
||||
rpass.set_vertex_buffer(0, self.special_teapot.vertex_buf.slice(..));
|
||||
|
||||
for entity in self.special_teapot.entities.iter() {
|
||||
rpass.set_index_buffer(entity.index_buf.slice(..), wgpu::IndexFormat::Uint16);
|
||||
rpass.draw_indexed(0..entity.index_count, 0, 0..1);
|
||||
}
|
||||
|
||||
rpass.set_pipeline(&self.depth_overwrite_pipeline);
|
||||
rpass.draw(0..6, 0..1);
|
||||
|
||||
//draw models
|
||||
rpass.set_pipeline(&self.entity_pipeline);
|
||||
for model in self.models.iter() {
|
||||
rpass.set_bind_group(1, &model.bind_group, &[]);
|
||||
rpass.set_vertex_buffer(0, model.vertex_buf.slice(..));
|
||||
|
||||
for entity in model.entities.iter() {
|
||||
rpass.set_index_buffer(entity.index_buf.slice(..), wgpu::IndexFormat::Uint16);
|
||||
rpass.draw_indexed(0..entity.index_count, 0, 0..1);
|
||||
}
|
||||
}
|
||||
|
||||
rpass.set_pipeline(&self.ground_pipeline);
|
||||
//rpass.set_index_buffer(&[0u16,1,2,1,2,3][..] as wgpu::BufferSlice, wgpu::IndexFormat::Uint16);
|
||||
//rpass.draw_indexed(0..4, 0, 0..1);
|
||||
rpass.draw(0..6, 0..1);
|
||||
|
||||
rpass.set_pipeline(&self.sky_pipeline);
|
||||
rpass.draw(0..3, 0..1);
|
||||
}
|
||||
|
||||
queue.submit(std::iter::once(encoder.finish()));
|
||||
|
||||
self.staging_belt.recall();
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
setup::setup_and_start(format!("Strafe Client v{}",env!("CARGO_PKG_VERSION")));
|
||||
strafe_client::framework::run::<Skybox>(
|
||||
format!("Strafe Client v{}",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
).as_str()
|
||||
);
|
||||
}
|
||||
|
@ -1,48 +0,0 @@
|
||||
use bytemuck::{Pod,Zeroable};
|
||||
use strafesnet_common::model::{IndexedVertex,PolygonGroup,RenderConfigId};
|
||||
#[derive(Clone,Copy,Pod,Zeroable)]
|
||||
#[repr(C)]
|
||||
pub struct GraphicsVertex{
|
||||
pub pos:[f32;3],
|
||||
pub tex:[f32;2],
|
||||
pub normal:[f32;3],
|
||||
pub color:[f32;4],
|
||||
}
|
||||
#[derive(Clone,Copy,id::Id)]
|
||||
pub struct IndexedGraphicsMeshOwnedRenderConfigId(u32);
|
||||
pub struct IndexedGraphicsMeshOwnedRenderConfig{
|
||||
pub unique_pos:Vec<[f32;3]>,
|
||||
pub unique_tex:Vec<[f32;2]>,
|
||||
pub unique_normal:Vec<[f32;3]>,
|
||||
pub unique_color:Vec<[f32;4]>,
|
||||
pub unique_vertices:Vec<IndexedVertex>,
|
||||
pub render_config:RenderConfigId,
|
||||
pub polys:PolygonGroup,
|
||||
pub instances:Vec<GraphicsModelOwned>,
|
||||
}
|
||||
pub enum Indices{
|
||||
U32(Vec<u32>),
|
||||
U16(Vec<u16>),
|
||||
}
|
||||
pub struct GraphicsMeshOwnedRenderConfig{
|
||||
pub vertices:Vec<GraphicsVertex>,
|
||||
pub indices:Indices,
|
||||
pub render_config:RenderConfigId,
|
||||
pub instances:Vec<GraphicsModelOwned>,
|
||||
}
|
||||
#[derive(Clone,Copy,PartialEq,id::Id)]
|
||||
pub struct GraphicsModelColor4(glam::Vec4);
|
||||
impl std::hash::Hash for GraphicsModelColor4{
|
||||
fn hash<H:std::hash::Hasher>(&self,state:&mut H) {
|
||||
for &f in self.0.as_ref(){
|
||||
bytemuck::cast::<f32,u32>(f).hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Eq for GraphicsModelColor4{}
|
||||
#[derive(Clone)]
|
||||
pub struct GraphicsModelOwned{
|
||||
pub transform:glam::Mat4,
|
||||
pub normal_transform:glam::Mat3,
|
||||
pub color:GraphicsModelColor4,
|
||||
}
|
1001
src/model_physics.rs
1001
src/model_physics.rs
File diff suppressed because it is too large
Load Diff
2184
src/physics.rs
2184
src/physics.rs
File diff suppressed because it is too large
Load Diff
@ -1,242 +0,0 @@
|
||||
use strafesnet_common::mouse::MouseState;
|
||||
use strafesnet_common::physics::Instruction as PhysicsInputInstruction;
|
||||
use strafesnet_common::integer::Time;
|
||||
use strafesnet_common::instruction::TimedInstruction;
|
||||
use strafesnet_common::timer::{Scaled,Timer,TimerState};
|
||||
use mouse_interpolator::MouseInterpolator;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum InputInstruction{
|
||||
MoveMouse(glam::IVec2),
|
||||
MoveRight(bool),
|
||||
MoveUp(bool),
|
||||
MoveBack(bool),
|
||||
MoveLeft(bool),
|
||||
MoveDown(bool),
|
||||
MoveForward(bool),
|
||||
Jump(bool),
|
||||
Zoom(bool),
|
||||
ResetAndRestart,
|
||||
ResetAndSpawn(strafesnet_common::gameplay_modes::ModeId,strafesnet_common::gameplay_modes::StageId),
|
||||
PracticeFly,
|
||||
}
|
||||
pub enum Instruction{
|
||||
Input(InputInstruction),
|
||||
Render,
|
||||
Resize(winit::dpi::PhysicalSize<u32>),
|
||||
ChangeMap(strafesnet_common::map::CompleteMap),
|
||||
//SetPaused is not an InputInstruction: the physics doesn't know that it's paused.
|
||||
SetPaused(bool),
|
||||
//Graphics(crate::graphics_worker::Instruction),
|
||||
}
|
||||
mod mouse_interpolator{
|
||||
use super::*;
|
||||
//TODO: move this or tab
|
||||
pub struct MouseInterpolator{
|
||||
//"PlayerController"
|
||||
user_settings:crate::settings::UserSettings,
|
||||
//"MouseInterpolator"
|
||||
timeline:std::collections::VecDeque<TimedInstruction<PhysicsInputInstruction>>,
|
||||
last_mouse_time:Time,//this value is pre-transformed to simulation time
|
||||
mouse_blocking:bool,
|
||||
//"Simulation"
|
||||
timer:Timer<Scaled>,
|
||||
physics:crate::physics::PhysicsContext,
|
||||
|
||||
}
|
||||
impl MouseInterpolator{
|
||||
pub fn new(
|
||||
physics:crate::physics::PhysicsContext,
|
||||
user_settings:crate::settings::UserSettings,
|
||||
)->MouseInterpolator{
|
||||
MouseInterpolator{
|
||||
mouse_blocking:true,
|
||||
last_mouse_time:physics.get_next_mouse().time,
|
||||
timeline:std::collections::VecDeque::new(),
|
||||
timer:Timer::from_state(Scaled::identity(),false),
|
||||
physics,
|
||||
user_settings,
|
||||
}
|
||||
}
|
||||
fn push_mouse_instruction(&mut self,ins:&TimedInstruction<Instruction>,m:glam::IVec2){
|
||||
if self.mouse_blocking{
|
||||
//tell the game state which is living in the past about its future
|
||||
self.timeline.push_front(TimedInstruction{
|
||||
time:self.last_mouse_time,
|
||||
instruction:PhysicsInputInstruction::SetNextMouse(MouseState{time:self.timer.time(ins.time),pos:m}),
|
||||
});
|
||||
}else{
|
||||
//mouse has just started moving again after being still for longer than 10ms.
|
||||
//replace the entire mouse interpolation state to avoid an intermediate state with identical m0.t m1.t timestamps which will divide by zero
|
||||
self.timeline.push_front(TimedInstruction{
|
||||
time:self.last_mouse_time,
|
||||
instruction:PhysicsInputInstruction::ReplaceMouse(
|
||||
MouseState{time:self.last_mouse_time,pos:self.physics.get_next_mouse().pos},
|
||||
MouseState{time:self.timer.time(ins.time),pos:m}
|
||||
),
|
||||
});
|
||||
//delay physics execution until we have an interpolation target
|
||||
self.mouse_blocking=true;
|
||||
}
|
||||
self.last_mouse_time=self.timer.time(ins.time);
|
||||
}
|
||||
fn push(&mut self,time:Time,phys_input:PhysicsInputInstruction){
|
||||
//This is always a non-mouse event
|
||||
self.timeline.push_back(TimedInstruction{
|
||||
time:self.timer.time(time),
|
||||
instruction:phys_input,
|
||||
});
|
||||
}
|
||||
/// returns should_empty_queue
|
||||
/// may or may not mutate internal state XD!
|
||||
fn map_instruction(&mut self,ins:&TimedInstruction<Instruction>)->bool{
|
||||
let mut update_mouse_blocking=true;
|
||||
match &ins.instruction{
|
||||
Instruction::Input(input_instruction)=>match input_instruction{
|
||||
&InputInstruction::MoveMouse(m)=>{
|
||||
if !self.timer.is_paused(){
|
||||
self.push_mouse_instruction(ins,m);
|
||||
}
|
||||
update_mouse_blocking=false;
|
||||
},
|
||||
&InputInstruction::MoveForward(s)=>self.push(ins.time,PhysicsInputInstruction::SetMoveForward(s)),
|
||||
&InputInstruction::MoveLeft(s)=>self.push(ins.time,PhysicsInputInstruction::SetMoveLeft(s)),
|
||||
&InputInstruction::MoveBack(s)=>self.push(ins.time,PhysicsInputInstruction::SetMoveBack(s)),
|
||||
&InputInstruction::MoveRight(s)=>self.push(ins.time,PhysicsInputInstruction::SetMoveRight(s)),
|
||||
&InputInstruction::MoveUp(s)=>self.push(ins.time,PhysicsInputInstruction::SetMoveUp(s)),
|
||||
&InputInstruction::MoveDown(s)=>self.push(ins.time,PhysicsInputInstruction::SetMoveDown(s)),
|
||||
&InputInstruction::Jump(s)=>self.push(ins.time,PhysicsInputInstruction::SetJump(s)),
|
||||
&InputInstruction::Zoom(s)=>self.push(ins.time,PhysicsInputInstruction::SetZoom(s)),
|
||||
&InputInstruction::ResetAndSpawn(mode_id,stage_id)=>{
|
||||
self.push(ins.time,PhysicsInputInstruction::Reset);
|
||||
self.push(ins.time,PhysicsInputInstruction::SetSensitivity(self.user_settings.calculate_sensitivity()));
|
||||
self.push(ins.time,PhysicsInputInstruction::Spawn(mode_id,stage_id));
|
||||
},
|
||||
InputInstruction::ResetAndRestart=>{
|
||||
self.push(ins.time,PhysicsInputInstruction::Reset);
|
||||
self.push(ins.time,PhysicsInputInstruction::SetSensitivity(self.user_settings.calculate_sensitivity()));
|
||||
self.push(ins.time,PhysicsInputInstruction::Restart);
|
||||
},
|
||||
InputInstruction::PracticeFly=>self.push(ins.time,PhysicsInputInstruction::PracticeFly),
|
||||
},
|
||||
//do these really need to idle the physics?
|
||||
//sending None dumps the instruction queue
|
||||
Instruction::ChangeMap(_)=>self.push(ins.time,PhysicsInputInstruction::Idle),
|
||||
Instruction::Resize(_)=>self.push(ins.time,PhysicsInputInstruction::Idle),
|
||||
Instruction::Render=>self.push(ins.time,PhysicsInputInstruction::Idle),
|
||||
&Instruction::SetPaused(paused)=>{
|
||||
if let Err(e)=self.timer.set_paused(ins.time,paused){
|
||||
println!("Cannot pause: {e}");
|
||||
}
|
||||
self.push(ins.time,PhysicsInputInstruction::Idle);
|
||||
},
|
||||
}
|
||||
if update_mouse_blocking{
|
||||
//this returns the bool for us
|
||||
self.update_mouse_blocking(ins.time)
|
||||
}else{
|
||||
//do flush that queue
|
||||
true
|
||||
}
|
||||
}
|
||||
/// must check if self.mouse_blocking==true before calling!
|
||||
fn unblock_mouse(&mut self,time:Time){
|
||||
//push an event to extrapolate no movement from
|
||||
self.timeline.push_front(TimedInstruction{
|
||||
time:self.last_mouse_time,
|
||||
instruction:PhysicsInputInstruction::SetNextMouse(MouseState{time:self.timer.time(time),pos:self.physics.get_next_mouse().pos}),
|
||||
});
|
||||
self.last_mouse_time=self.timer.time(time);
|
||||
//stop blocking. the mouse is not moving so the physics does not need to live in the past and wait for interpolation targets.
|
||||
self.mouse_blocking=false;
|
||||
}
|
||||
fn update_mouse_blocking(&mut self,time:Time)->bool{
|
||||
if self.mouse_blocking{
|
||||
//assume the mouse has stopped moving after 10ms.
|
||||
//shitty mice are 125Hz which is 8ms so this should cover that.
|
||||
//setting this to 100us still doesn't print even though it's 10x lower than the polling rate,
|
||||
//so mouse events are probably not handled separately from drawing and fire right before it :(
|
||||
if Time::from_millis(10)<self.timer.time(time)-self.physics.get_next_mouse().time{
|
||||
self.unblock_mouse(time);
|
||||
true
|
||||
}else{
|
||||
false
|
||||
}
|
||||
}else{
|
||||
//keep this up to date so that it can be used as a known-timestamp
|
||||
//that the mouse was not moving when the mouse starts moving again
|
||||
self.last_mouse_time=self.timer.time(time);
|
||||
true
|
||||
}
|
||||
}
|
||||
fn empty_queue(&mut self){
|
||||
while let Some(instruction)=self.timeline.pop_front(){
|
||||
self.physics.run_input_instruction(instruction);
|
||||
}
|
||||
}
|
||||
pub fn handle_instruction(&mut self,ins:&TimedInstruction<Instruction>){
|
||||
let should_empty_queue=self.map_instruction(ins);
|
||||
if should_empty_queue{
|
||||
self.empty_queue();
|
||||
}
|
||||
}
|
||||
pub fn get_frame_state(&self,time:Time)->crate::graphics::FrameState{
|
||||
crate::graphics::FrameState{
|
||||
body:self.physics.camera_body(),
|
||||
camera:self.physics.camera(),
|
||||
time:self.timer.time(time),
|
||||
}
|
||||
}
|
||||
pub fn change_map(&mut self,time:Time,map:&strafesnet_common::map::CompleteMap){
|
||||
//dump any pending interpolation state
|
||||
if self.mouse_blocking{
|
||||
self.unblock_mouse(time);
|
||||
}
|
||||
self.empty_queue();
|
||||
|
||||
//doing it like this to avoid doing PhysicsInstruction::ChangeMap(Rc<CompleteMap>)
|
||||
self.physics.generate_models(&map);
|
||||
|
||||
//use the standard input interface so the instructions are written out to bots
|
||||
self.handle_instruction(&TimedInstruction{
|
||||
time:self.timer.time(time),
|
||||
instruction:Instruction::Input(InputInstruction::ResetAndSpawn(
|
||||
strafesnet_common::gameplay_modes::ModeId::MAIN,
|
||||
strafesnet_common::gameplay_modes::StageId::FIRST,
|
||||
)),
|
||||
});
|
||||
}
|
||||
pub const fn user_settings(&self)->&crate::settings::UserSettings{
|
||||
&self.user_settings
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new<'a>(
|
||||
mut graphics_worker:crate::compat_worker::INWorker<'a,crate::graphics_worker::Instruction>,
|
||||
user_settings:crate::settings::UserSettings,
|
||||
)->crate::compat_worker::QNWorker<'a,TimedInstruction<Instruction>>{
|
||||
let physics=crate::physics::PhysicsContext::default();
|
||||
let mut interpolator=MouseInterpolator::new(
|
||||
physics,
|
||||
user_settings
|
||||
);
|
||||
crate::compat_worker::QNWorker::new(move |ins:TimedInstruction<Instruction>|{
|
||||
interpolator.handle_instruction(&ins);
|
||||
match ins.instruction{
|
||||
Instruction::Render=>{
|
||||
let frame_state=interpolator.get_frame_state(ins.time);
|
||||
graphics_worker.send(crate::graphics_worker::Instruction::Render(frame_state)).unwrap();
|
||||
},
|
||||
Instruction::Resize(size)=>{
|
||||
graphics_worker.send(crate::graphics_worker::Instruction::Resize(size,interpolator.user_settings().clone())).unwrap();
|
||||
},
|
||||
Instruction::ChangeMap(map)=>{
|
||||
interpolator.change_map(ins.time,&map);
|
||||
graphics_worker.send(crate::graphics_worker::Instruction::ChangeMap(map)).unwrap();
|
||||
},
|
||||
Instruction::Input(_)=>(),
|
||||
Instruction::SetPaused(_)=>(),
|
||||
}
|
||||
})
|
||||
}
|
139
src/settings.rs
139
src/settings.rs
@ -1,139 +0,0 @@
|
||||
use strafesnet_common::integer::{Ratio64,Ratio64Vec2};
|
||||
#[derive(Clone)]
|
||||
struct Ratio{
|
||||
ratio:f64,
|
||||
}
|
||||
#[derive(Clone)]
|
||||
enum DerivedFov{
|
||||
FromScreenAspect,
|
||||
FromAspect(Ratio),
|
||||
}
|
||||
#[derive(Clone)]
|
||||
enum Fov{
|
||||
Exactly{x:f64,y:f64},
|
||||
SpecifyXDeriveY{x:f64,y:DerivedFov},
|
||||
SpecifyYDeriveX{x:DerivedFov,y:f64},
|
||||
}
|
||||
impl Default for Fov{
|
||||
fn default()->Self{
|
||||
Fov::SpecifyYDeriveX{x:DerivedFov::FromScreenAspect,y:1.0}
|
||||
}
|
||||
}
|
||||
#[derive(Clone)]
|
||||
enum DerivedSensitivity{
|
||||
FromRatio(Ratio64),
|
||||
}
|
||||
#[derive(Clone)]
|
||||
enum Sensitivity{
|
||||
Exactly{x:Ratio64,y:Ratio64},
|
||||
SpecifyXDeriveY{x:Ratio64,y:DerivedSensitivity},
|
||||
SpecifyYDeriveX{x:DerivedSensitivity,y:Ratio64},
|
||||
}
|
||||
impl Default for Sensitivity{
|
||||
fn default()->Self{
|
||||
Sensitivity::SpecifyXDeriveY{x:Ratio64::ONE*524288,y:DerivedSensitivity::FromRatio(Ratio64::ONE)}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default,Clone)]
|
||||
pub struct UserSettings{
|
||||
fov:Fov,
|
||||
sensitivity:Sensitivity,
|
||||
}
|
||||
impl UserSettings{
|
||||
pub fn calculate_fov(&self,zoom:f64,screen_size:&glam::UVec2)->glam::DVec2{
|
||||
zoom*match &self.fov{
|
||||
&Fov::Exactly{x,y}=>glam::dvec2(x,y),
|
||||
Fov::SpecifyXDeriveY{x,y}=>match y{
|
||||
DerivedFov::FromScreenAspect=>glam::dvec2(*x,x*(screen_size.y as f64/screen_size.x as f64)),
|
||||
DerivedFov::FromAspect(ratio)=>glam::dvec2(*x,x*ratio.ratio),
|
||||
},
|
||||
Fov::SpecifyYDeriveX{x,y}=>match x{
|
||||
DerivedFov::FromScreenAspect=>glam::dvec2(y*(screen_size.x as f64/screen_size.y as f64),*y),
|
||||
DerivedFov::FromAspect(ratio)=>glam::dvec2(y*ratio.ratio,*y),
|
||||
},
|
||||
}
|
||||
}
|
||||
pub fn calculate_sensitivity(&self)->Ratio64Vec2{
|
||||
match &self.sensitivity{
|
||||
Sensitivity::Exactly{x,y}=>Ratio64Vec2::new(x.clone(),y.clone()),
|
||||
Sensitivity::SpecifyXDeriveY{x,y}=>match y{
|
||||
DerivedSensitivity::FromRatio(ratio)=>Ratio64Vec2::new(x.clone(),x.mul_ref(ratio)),
|
||||
}
|
||||
Sensitivity::SpecifyYDeriveX{x,y}=>match x{
|
||||
DerivedSensitivity::FromRatio(ratio)=>Ratio64Vec2::new(y.mul_ref(ratio),y.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
//sensitivity is raw input dots (i.e. dpi = dots per inch) to radians conversion factor
|
||||
sensitivity_x=0.001
|
||||
sensitivity_y_from_x_ratio=1
|
||||
Sensitivity::DeriveY{x:0.0.001,y:DerivedSensitivity{ratio:1.0}}
|
||||
*/
|
||||
|
||||
pub fn read_user_settings()->UserSettings{
|
||||
let mut cfg=configparser::ini::Ini::new();
|
||||
if let Ok(_)=cfg.load("settings.conf"){
|
||||
let (cfg_fov_x,cfg_fov_y)=(cfg.getfloat("camera","fov_x"),cfg.getfloat("camera","fov_y"));
|
||||
let fov=match(cfg_fov_x,cfg_fov_y){
|
||||
(Ok(Some(fov_x)),Ok(Some(fov_y)))=>Fov::Exactly {
|
||||
x:fov_x,
|
||||
y:fov_y
|
||||
},
|
||||
(Ok(Some(fov_x)),Ok(None))=>Fov::SpecifyXDeriveY{
|
||||
x:fov_x,
|
||||
y:if let Ok(Some(fov_y_from_x_ratio))=cfg.getfloat("camera","fov_y_from_x_ratio"){
|
||||
DerivedFov::FromAspect(Ratio{ratio:fov_y_from_x_ratio})
|
||||
}else{
|
||||
DerivedFov::FromScreenAspect
|
||||
}
|
||||
},
|
||||
(Ok(None),Ok(Some(fov_y)))=>Fov::SpecifyYDeriveX{
|
||||
x:if let Ok(Some(fov_x_from_y_ratio))=cfg.getfloat("camera","fov_x_from_y_ratio"){
|
||||
DerivedFov::FromAspect(Ratio{ratio:fov_x_from_y_ratio})
|
||||
}else{
|
||||
DerivedFov::FromScreenAspect
|
||||
},
|
||||
y:fov_y,
|
||||
},
|
||||
_=>{
|
||||
Fov::default()
|
||||
},
|
||||
};
|
||||
let (cfg_sensitivity_x,cfg_sensitivity_y)=(cfg.getfloat("camera","sensitivity_x"),cfg.getfloat("camera","sensitivity_y"));
|
||||
let sensitivity=match(cfg_sensitivity_x,cfg_sensitivity_y){
|
||||
(Ok(Some(sensitivity_x)),Ok(Some(sensitivity_y)))=>Sensitivity::Exactly {
|
||||
x:Ratio64::try_from(sensitivity_x).unwrap(),
|
||||
y:Ratio64::try_from(sensitivity_y).unwrap(),
|
||||
},
|
||||
(Ok(Some(sensitivity_x)),Ok(None))=>Sensitivity::SpecifyXDeriveY{
|
||||
x:Ratio64::try_from(sensitivity_x).unwrap(),
|
||||
y:if let Ok(Some(sensitivity_y_from_x_ratio))=cfg.getfloat("camera","sensitivity_y_from_x_ratio"){
|
||||
DerivedSensitivity::FromRatio(Ratio64::try_from(sensitivity_y_from_x_ratio).unwrap())
|
||||
}else{
|
||||
DerivedSensitivity::FromRatio(Ratio64::ONE)
|
||||
},
|
||||
},
|
||||
(Ok(None),Ok(Some(sensitivity_y)))=>Sensitivity::SpecifyYDeriveX{
|
||||
x:if let Ok(Some(sensitivity_x_from_y_ratio))=cfg.getfloat("camera","sensitivity_x_from_y_ratio"){
|
||||
DerivedSensitivity::FromRatio(Ratio64::try_from(sensitivity_x_from_y_ratio).unwrap())
|
||||
}else{
|
||||
DerivedSensitivity::FromRatio(Ratio64::ONE)
|
||||
},
|
||||
y:Ratio64::try_from(sensitivity_y).unwrap(),
|
||||
},
|
||||
_=>{
|
||||
Sensitivity::default()
|
||||
},
|
||||
};
|
||||
UserSettings{
|
||||
fov,
|
||||
sensitivity,
|
||||
}
|
||||
}else{
|
||||
UserSettings::default()
|
||||
}
|
||||
}
|
292
src/setup.rs
292
src/setup.rs
@ -1,292 +0,0 @@
|
||||
use crate::window::WindowInstruction;
|
||||
use strafesnet_common::instruction::TimedInstruction;
|
||||
use strafesnet_common::integer;
|
||||
|
||||
fn optional_features()->wgpu::Features{
|
||||
wgpu::Features::TEXTURE_COMPRESSION_ASTC
|
||||
|wgpu::Features::TEXTURE_COMPRESSION_ETC2
|
||||
}
|
||||
fn required_features()->wgpu::Features{
|
||||
wgpu::Features::TEXTURE_COMPRESSION_BC
|
||||
}
|
||||
fn required_downlevel_capabilities()->wgpu::DownlevelCapabilities{
|
||||
wgpu::DownlevelCapabilities{
|
||||
flags:wgpu::DownlevelFlags::empty(),
|
||||
shader_model:wgpu::ShaderModel::Sm5,
|
||||
..wgpu::DownlevelCapabilities::default()
|
||||
}
|
||||
}
|
||||
pub fn required_limits()->wgpu::Limits{
|
||||
wgpu::Limits::default()
|
||||
}
|
||||
|
||||
struct SetupContextPartial1{
|
||||
backends:wgpu::Backends,
|
||||
instance:wgpu::Instance,
|
||||
}
|
||||
fn create_window(title:&str,event_loop:&winit::event_loop::EventLoop<()>)->Result<winit::window::Window,winit::error::OsError>{
|
||||
let mut attr=winit::window::WindowAttributes::default();
|
||||
attr=attr.with_title(title);
|
||||
#[cfg(windows_OFF)] // TODO
|
||||
{
|
||||
use winit::platform::windows::WindowBuilderExtWindows;
|
||||
builder=builder.with_no_redirection_bitmap(true);
|
||||
}
|
||||
event_loop.create_window(attr)
|
||||
}
|
||||
fn create_instance()->SetupContextPartial1{
|
||||
let backends=wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
|
||||
let dx12_shader_compiler=wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
|
||||
SetupContextPartial1{
|
||||
backends,
|
||||
instance:wgpu::Instance::new(wgpu::InstanceDescriptor{
|
||||
backends,
|
||||
dx12_shader_compiler,
|
||||
..Default::default()
|
||||
}),
|
||||
}
|
||||
}
|
||||
impl SetupContextPartial1{
|
||||
fn create_surface<'a>(self,window:&'a winit::window::Window)->Result<SetupContextPartial2<'a>,wgpu::CreateSurfaceError>{
|
||||
Ok(SetupContextPartial2{
|
||||
backends:self.backends,
|
||||
surface:self.instance.create_surface(window)?,
|
||||
instance:self.instance,
|
||||
})
|
||||
}
|
||||
}
|
||||
struct SetupContextPartial2<'a>{
|
||||
backends:wgpu::Backends,
|
||||
instance:wgpu::Instance,
|
||||
surface:wgpu::Surface<'a>,
|
||||
}
|
||||
impl<'a> SetupContextPartial2<'a>{
|
||||
fn pick_adapter(self)->SetupContextPartial3<'a>{
|
||||
let adapter;
|
||||
|
||||
//TODO: prefer adapter that implements optional features
|
||||
//let optional_features=optional_features();
|
||||
let required_features=required_features();
|
||||
|
||||
//no helper function smh gotta write it myself
|
||||
let adapters=self.instance.enumerate_adapters(self.backends);
|
||||
|
||||
let mut chosen_adapter=None;
|
||||
let mut chosen_adapter_score=0;
|
||||
for adapter in adapters {
|
||||
if !adapter.is_surface_supported(&self.surface) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let score=match adapter.get_info().device_type{
|
||||
wgpu::DeviceType::IntegratedGpu=>3,
|
||||
wgpu::DeviceType::DiscreteGpu=>4,
|
||||
wgpu::DeviceType::VirtualGpu=>2,
|
||||
wgpu::DeviceType::Other|wgpu::DeviceType::Cpu=>1,
|
||||
};
|
||||
|
||||
let adapter_features=adapter.features();
|
||||
if chosen_adapter_score<score&&adapter_features.contains(required_features) {
|
||||
chosen_adapter_score=score;
|
||||
chosen_adapter=Some(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(maybe_chosen_adapter)=chosen_adapter{
|
||||
adapter=maybe_chosen_adapter;
|
||||
}else{
|
||||
panic!("No suitable GPU adapters found on the system!");
|
||||
}
|
||||
|
||||
|
||||
let adapter_info=adapter.get_info();
|
||||
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
|
||||
|
||||
let required_downlevel_capabilities=required_downlevel_capabilities();
|
||||
let downlevel_capabilities=adapter.get_downlevel_capabilities();
|
||||
assert!(
|
||||
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
|
||||
"Adapter does not support the minimum shader model required to run this example: {:?}",
|
||||
required_downlevel_capabilities.shader_model
|
||||
);
|
||||
assert!(
|
||||
downlevel_capabilities
|
||||
.flags
|
||||
.contains(required_downlevel_capabilities.flags),
|
||||
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
|
||||
required_downlevel_capabilities.flags - downlevel_capabilities.flags
|
||||
);
|
||||
SetupContextPartial3{
|
||||
instance:self.instance,
|
||||
surface:self.surface,
|
||||
adapter,
|
||||
}
|
||||
}
|
||||
}
|
||||
struct SetupContextPartial3<'a>{
|
||||
instance:wgpu::Instance,
|
||||
surface:wgpu::Surface<'a>,
|
||||
adapter:wgpu::Adapter,
|
||||
}
|
||||
impl<'a> SetupContextPartial3<'a>{
|
||||
fn request_device(self)->SetupContextPartial4<'a>{
|
||||
let optional_features=optional_features();
|
||||
let required_features=required_features();
|
||||
|
||||
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
|
||||
let needed_limits=required_limits().using_resolution(self.adapter.limits());
|
||||
|
||||
let trace_dir=std::env::var("WGPU_TRACE");
|
||||
let (device, queue)=pollster::block_on(self.adapter
|
||||
.request_device(
|
||||
&wgpu::DeviceDescriptor {
|
||||
label: None,
|
||||
required_features: (optional_features & self.adapter.features()) | required_features,
|
||||
required_limits: needed_limits,
|
||||
memory_hints:wgpu::MemoryHints::Performance,
|
||||
},
|
||||
trace_dir.ok().as_ref().map(std::path::Path::new),
|
||||
))
|
||||
.expect("Unable to find a suitable GPU adapter!");
|
||||
|
||||
SetupContextPartial4{
|
||||
instance:self.instance,
|
||||
surface:self.surface,
|
||||
adapter:self.adapter,
|
||||
device,
|
||||
queue,
|
||||
}
|
||||
}
|
||||
}
|
||||
struct SetupContextPartial4<'a>{
|
||||
instance:wgpu::Instance,
|
||||
surface:wgpu::Surface<'a>,
|
||||
adapter:wgpu::Adapter,
|
||||
device:wgpu::Device,
|
||||
queue:wgpu::Queue,
|
||||
}
|
||||
impl<'a> SetupContextPartial4<'a>{
|
||||
fn configure_surface(self,size:&'a winit::dpi::PhysicalSize<u32>)->SetupContext<'a>{
|
||||
let mut config=self.surface
|
||||
.get_default_config(&self.adapter, size.width, size.height)
|
||||
.expect("Surface isn't supported by the adapter.");
|
||||
let surface_view_format=config.format.add_srgb_suffix();
|
||||
config.view_formats.push(surface_view_format);
|
||||
config.present_mode=wgpu::PresentMode::AutoNoVsync;
|
||||
self.surface.configure(&self.device, &config);
|
||||
|
||||
SetupContext{
|
||||
instance:self.instance,
|
||||
surface:self.surface,
|
||||
device:self.device,
|
||||
queue:self.queue,
|
||||
config,
|
||||
}
|
||||
}
|
||||
}
|
||||
pub struct SetupContext<'a>{
|
||||
pub instance:wgpu::Instance,
|
||||
pub surface:wgpu::Surface<'a>,
|
||||
pub device:wgpu::Device,
|
||||
pub queue:wgpu::Queue,
|
||||
pub config:wgpu::SurfaceConfiguration,
|
||||
}
|
||||
|
||||
pub fn setup_and_start(title:String){
|
||||
let event_loop=winit::event_loop::EventLoop::new().unwrap();
|
||||
|
||||
println!("Initializing the surface...");
|
||||
|
||||
let partial_1=create_instance();
|
||||
|
||||
let window=create_window(title.as_str(),&event_loop).unwrap();
|
||||
|
||||
let partial_2=partial_1.create_surface(&window).unwrap();
|
||||
|
||||
let partial_3=partial_2.pick_adapter();
|
||||
|
||||
let partial_4=partial_3.request_device();
|
||||
|
||||
let size=window.inner_size();
|
||||
|
||||
let setup_context=partial_4.configure_surface(&size);
|
||||
|
||||
//dedicated thread to ping request redraw back and resize the window doesn't seem logical
|
||||
|
||||
//the thread that spawns the physics thread
|
||||
let mut window_thread=crate::window::worker(
|
||||
&window,
|
||||
setup_context,
|
||||
);
|
||||
|
||||
if let Some(arg)=std::env::args().nth(1){
|
||||
let path=std::path::PathBuf::from(arg);
|
||||
window_thread.send(TimedInstruction{
|
||||
time:integer::Time::ZERO,
|
||||
instruction:WindowInstruction::WindowEvent(winit::event::WindowEvent::DroppedFile(path)),
|
||||
}).unwrap();
|
||||
};
|
||||
|
||||
println!("Entering event loop...");
|
||||
let root_time=std::time::Instant::now();
|
||||
run_event_loop(event_loop,window_thread,root_time).unwrap();
|
||||
}
|
||||
|
||||
fn run_event_loop(
|
||||
event_loop:winit::event_loop::EventLoop<()>,
|
||||
mut window_thread:crate::compat_worker::QNWorker<TimedInstruction<WindowInstruction>>,
|
||||
root_time:std::time::Instant
|
||||
)->Result<(),winit::error::EventLoopError>{
|
||||
event_loop.run(move |event,elwt|{
|
||||
let time=integer::Time::from_nanos(root_time.elapsed().as_nanos() as i64);
|
||||
// *control_flow=if cfg!(feature="metal-auto-capture"){
|
||||
// winit::event_loop::ControlFlow::Exit
|
||||
// }else{
|
||||
// winit::event_loop::ControlFlow::Poll
|
||||
// };
|
||||
match event{
|
||||
winit::event::Event::AboutToWait=>{
|
||||
window_thread.send(TimedInstruction{time,instruction:WindowInstruction::RequestRedraw}).unwrap();
|
||||
}
|
||||
winit::event::Event::WindowEvent {
|
||||
event:
|
||||
// WindowEvent::Resized(size)
|
||||
// | WindowEvent::ScaleFactorChanged {
|
||||
// new_inner_size: &mut size,
|
||||
// ..
|
||||
// },
|
||||
winit::event::WindowEvent::Resized(size),//ignoring scale factor changed for now because mutex bruh
|
||||
window_id:_,
|
||||
} => {
|
||||
window_thread.send(TimedInstruction{time,instruction:WindowInstruction::Resize(size)}).unwrap();
|
||||
}
|
||||
winit::event::Event::WindowEvent{event,..}=>match event{
|
||||
winit::event::WindowEvent::KeyboardInput{
|
||||
event:
|
||||
winit::event::KeyEvent {
|
||||
logical_key: winit::keyboard::Key::Named(winit::keyboard::NamedKey::Escape),
|
||||
state: winit::event::ElementState::Pressed,
|
||||
..
|
||||
},
|
||||
..
|
||||
}
|
||||
|winit::event::WindowEvent::CloseRequested=>{
|
||||
elwt.exit();
|
||||
}
|
||||
winit::event::WindowEvent::RedrawRequested=>{
|
||||
window_thread.send(TimedInstruction{time,instruction:WindowInstruction::Render}).unwrap();
|
||||
}
|
||||
_=>{
|
||||
window_thread.send(TimedInstruction{time,instruction:WindowInstruction::WindowEvent(event)}).unwrap();
|
||||
}
|
||||
},
|
||||
winit::event::Event::DeviceEvent{
|
||||
event,
|
||||
..
|
||||
} => {
|
||||
window_thread.send(TimedInstruction{time,instruction:WindowInstruction::DeviceEvent(event)}).unwrap();
|
||||
},
|
||||
_=>{}
|
||||
}
|
||||
})
|
||||
}
|
188
src/shader.wgsl
188
src/shader.wgsl
@ -1,23 +1,21 @@
|
||||
struct Camera {
|
||||
struct SkyOutput {
|
||||
@builtin(position) position: vec4<f32>,
|
||||
@location(0) sampledir: vec3<f32>,
|
||||
};
|
||||
|
||||
struct Data {
|
||||
// from camera to screen
|
||||
proj: mat4x4<f32>,
|
||||
// from screen to camera
|
||||
proj_inv: mat4x4<f32>,
|
||||
// from world to camera
|
||||
view: mat4x4<f32>,
|
||||
// from camera to world
|
||||
view_inv: mat4x4<f32>,
|
||||
// camera position
|
||||
cam_pos: vec4<f32>,
|
||||
};
|
||||
|
||||
//group 0 is the camera
|
||||
@group(0)
|
||||
@binding(0)
|
||||
var<uniform> camera: Camera;
|
||||
|
||||
struct SkyOutput {
|
||||
@builtin(position) position: vec4<f32>,
|
||||
@location(0) sampledir: vec3<f32>,
|
||||
};
|
||||
var<uniform> r_data: Data;
|
||||
|
||||
@vertex
|
||||
fn vs_sky(@builtin(vertex_index) vertex_index: u32) -> SkyOutput {
|
||||
@ -31,8 +29,9 @@ fn vs_sky(@builtin(vertex_index) vertex_index: u32) -> SkyOutput {
|
||||
1.0
|
||||
);
|
||||
|
||||
let inv_model_view = mat3x3<f32>(camera.view_inv[0].xyz, camera.view_inv[1].xyz, camera.view_inv[2].xyz);
|
||||
let unprojected = camera.proj_inv * pos;
|
||||
// transposition = inversion for this orthonormal matrix
|
||||
let inv_model_view = transpose(mat3x3<f32>(r_data.view[0].xyz, r_data.view[1].xyz, r_data.view[2].xyz));
|
||||
let unprojected = r_data.proj_inv * pos;
|
||||
|
||||
var result: SkyOutput;
|
||||
result.sampledir = inv_model_view * unprojected.xyz;
|
||||
@ -40,73 +39,150 @@ fn vs_sky(@builtin(vertex_index) vertex_index: u32) -> SkyOutput {
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ModelInstance{
|
||||
transform:mat4x4<f32>,
|
||||
normal_transform:mat3x3<f32>,
|
||||
color:vec4<f32>,
|
||||
}
|
||||
//my fancy idea is to create a megatexture for each model that includes all the textures each intance will need
|
||||
//the texture transform then maps the texture coordinates to the location of the specific texture
|
||||
//group 1 is the model
|
||||
const MAX_MODEL_INSTANCES=512;
|
||||
@group(2)
|
||||
@binding(0)
|
||||
var<uniform> model_instances: array<ModelInstance, MAX_MODEL_INSTANCES>;
|
||||
@group(2)
|
||||
@binding(1)
|
||||
var model_texture: texture_2d<f32>;
|
||||
@group(2)
|
||||
@binding(2)
|
||||
var model_sampler: sampler;
|
||||
struct GroundOutput {
|
||||
@builtin(position) position: vec4<f32>,
|
||||
@location(4) pos: vec3<f32>,
|
||||
};
|
||||
|
||||
struct EntityOutputTexture {
|
||||
@vertex
|
||||
fn vs_ground(@builtin(vertex_index) vertex_index: u32) -> GroundOutput {
|
||||
// hacky way to draw two triangles that make a square
|
||||
let tmp1 = i32(vertex_index)/2-i32(vertex_index)/3;
|
||||
let tmp2 = i32(vertex_index)&1;
|
||||
let pos = vec3<f32>(
|
||||
f32(tmp1) * 2.0 - 1.0,
|
||||
0.0,
|
||||
f32(tmp2) * 2.0 - 1.0
|
||||
) * 160.0;
|
||||
|
||||
var result: GroundOutput;
|
||||
result.pos = pos;
|
||||
result.position = r_data.proj * r_data.view * vec4<f32>(pos, 1.0);
|
||||
return result;
|
||||
}
|
||||
|
||||
struct EntityOutput {
|
||||
@builtin(position) position: vec4<f32>,
|
||||
@location(1) texture: vec2<f32>,
|
||||
@location(2) normal: vec3<f32>,
|
||||
@location(3) view: vec3<f32>,
|
||||
@location(4) color: vec4<f32>,
|
||||
@location(5) @interpolate(flat) model_color: vec4<f32>,
|
||||
};
|
||||
|
||||
struct TransformData {
|
||||
transform: mat4x4<f32>,
|
||||
depth: mat4x4<f32>,
|
||||
use_depth: vec4<f32>,
|
||||
};
|
||||
|
||||
@group(1)
|
||||
@binding(0)
|
||||
var<uniform> transform: TransformData;
|
||||
|
||||
@vertex
|
||||
fn vs_entity_texture(
|
||||
@builtin(instance_index) instance: u32,
|
||||
fn vs_entity(
|
||||
@location(0) pos: vec3<f32>,
|
||||
@location(1) texture: vec2<f32>,
|
||||
@location(2) normal: vec3<f32>,
|
||||
@location(3) color: vec4<f32>,
|
||||
) -> EntityOutputTexture {
|
||||
var position: vec4<f32> = model_instances[instance].transform * vec4<f32>(pos, 1.0);
|
||||
var result: EntityOutputTexture;
|
||||
result.normal = model_instances[instance].normal_transform * normal;
|
||||
) -> EntityOutput {
|
||||
var position_depth: vec4<f32> = transform.depth * vec4<f32>(pos, 1.0);
|
||||
var position_depth_0: vec4<f32> = position_depth;
|
||||
position_depth_0.z=0.0;
|
||||
var position: vec4<f32> = transform.transform * mix(position_depth,position_depth_0,transform.use_depth);
|
||||
|
||||
var result: EntityOutput;
|
||||
result.normal = (transform.transform * mix(vec4<f32>(normal,0.0),vec4<f32>(0.0,0.0,1.0,0.0),transform.use_depth.z)).xyz;
|
||||
result.texture=texture;
|
||||
result.color = color;
|
||||
result.model_color = model_instances[instance].color;
|
||||
result.view = position.xyz - camera.view_inv[3].xyz;//col(3)
|
||||
result.position = camera.proj * camera.view * position;
|
||||
result.view = position.xyz - r_data.cam_pos.xyz;
|
||||
var screen_position: vec4<f32> = r_data.proj * r_data.view * position;
|
||||
result.position = mix(screen_position,position_depth,transform.use_depth);
|
||||
return result;
|
||||
}
|
||||
|
||||
//group 2 is the skybox texture
|
||||
@group(1)
|
||||
@binding(0)
|
||||
var cube_texture: texture_cube<f32>;
|
||||
@group(1)
|
||||
struct SquareOutput {
|
||||
@builtin(position) position: vec4<f32>,
|
||||
@location(2) normal: vec3<f32>,
|
||||
@location(3) view: vec3<f32>,
|
||||
@location(4) pos: vec3<f32>,
|
||||
};
|
||||
@vertex
|
||||
fn vs_square(@builtin(vertex_index) vertex_index: u32) -> SquareOutput {
|
||||
// hacky way to draw two triangles that make a square
|
||||
let tmp1 = i32(vertex_index)/2-i32(vertex_index)/3;
|
||||
let tmp2 = i32(vertex_index)&1;
|
||||
let pos = vec3<f32>(
|
||||
(f32(tmp1) - 0.5)*1.8,
|
||||
f32(tmp2) - 0.5,
|
||||
0.0
|
||||
);
|
||||
|
||||
var result: SquareOutput;
|
||||
result.normal = vec3<f32>(0.0,0.0,1.0);
|
||||
result.pos = (transform.transform * vec4<f32>(pos, 1.0)).xyz;
|
||||
result.view = result.pos - r_data.cam_pos.xyz;
|
||||
result.position = r_data.proj * r_data.view * transform.transform * vec4<f32>(pos, 1.0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@group(0)
|
||||
@binding(1)
|
||||
var cube_sampler: sampler;
|
||||
var r_texture: texture_cube<f32>;
|
||||
@group(0)
|
||||
@binding(2)
|
||||
var r_sampler: sampler;
|
||||
|
||||
@fragment
|
||||
fn fs_sky(vertex: SkyOutput) -> @location(0) vec4<f32> {
|
||||
return textureSample(cube_texture, cube_sampler, vertex.sampledir);
|
||||
return textureSample(r_texture, r_sampler, vertex.sampledir);
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_entity_texture(vertex: EntityOutputTexture) -> @location(0) vec4<f32> {
|
||||
fn fs_entity(vertex: EntityOutput) -> @location(0) vec4<f32> {
|
||||
let incident = normalize(vertex.view);
|
||||
let normal = normalize(vertex.normal);
|
||||
let d = dot(normal, incident);
|
||||
let reflected = incident - 2.0 * d * normal;
|
||||
|
||||
let fragment_color = textureSample(model_texture, model_sampler, vertex.texture)*vertex.color;
|
||||
let reflected_color = textureSample(cube_texture, cube_sampler, reflected).rgb;
|
||||
return mix(vec4<f32>(vec3<f32>(0.05) + 0.2 * reflected_color,1.0),mix(vertex.model_color,vec4<f32>(fragment_color.rgb,1.0),fragment_color.a),0.5+0.5*abs(d));
|
||||
let dir = vec3<f32>(-1.0)+2.0*vec3<f32>(vertex.texture.x,0.0,vertex.texture.y);
|
||||
let texture_color = textureSample(r_texture, r_sampler, dir).rgb;
|
||||
let reflected_color = textureSample(r_texture, r_sampler, reflected).rgb;
|
||||
return vec4<f32>(mix(vec3<f32>(0.1) + 0.5 * reflected_color,texture_color,1.0-pow(1.0-abs(d),2.0)), 1.0);
|
||||
}
|
||||
|
||||
fn modulo_euclidean (a: f32, b: f32) -> f32 {
|
||||
var m = a % b;
|
||||
if (m < 0.0) {
|
||||
if (b < 0.0) {
|
||||
m -= b;
|
||||
} else {
|
||||
m += b;
|
||||
}
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_ground(vertex: GroundOutput) -> @location(0) vec4<f32> {
|
||||
let dir = vec3<f32>(-1.0)+vec3<f32>(modulo_euclidean(vertex.pos.x/16.,1.0),0.0,modulo_euclidean(vertex.pos.z/16.,1.0))*2.0;
|
||||
return vec4<f32>(textureSample(r_texture, r_sampler, dir).rgb, 1.0);
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_checkered(vertex: SquareOutput) -> @location(0) vec4<f32> {
|
||||
let voxel_parity: f32 = f32(
|
||||
u32(modulo_euclidean(vertex.pos.x,2.0)<1.0)
|
||||
^ u32(modulo_euclidean(vertex.pos.y,2.0)<1.0)
|
||||
//^ u32(modulo_euclidean(vertex.pos.z,2.0)<1.0)
|
||||
);
|
||||
|
||||
let incident = normalize(vertex.view);
|
||||
let normal = normalize(vertex.normal);
|
||||
let d = dot(normal, incident);
|
||||
let reflected = incident - 2.0 * d * normal;
|
||||
|
||||
let texture_color = vec3<f32>(1.0)*voxel_parity;
|
||||
let reflected_color = textureSample(r_texture, r_sampler, reflected).rgb;
|
||||
return vec4<f32>(mix(vec3<f32>(0.1) + 0.5 * reflected_color,texture_color,1.0-pow(1.0-abs(d),2.0)), 1.0);
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_overwrite(vertex: SquareOutput) {}
|
223
src/window.rs
223
src/window.rs
@ -1,223 +0,0 @@
|
||||
use crate::physics_worker::InputInstruction;
|
||||
use strafesnet_common::integer;
|
||||
use strafesnet_common::instruction::TimedInstruction;
|
||||
|
||||
pub enum WindowInstruction{
|
||||
Resize(winit::dpi::PhysicalSize<u32>),
|
||||
WindowEvent(winit::event::WindowEvent),
|
||||
DeviceEvent(winit::event::DeviceEvent),
|
||||
RequestRedraw,
|
||||
Render,
|
||||
}
|
||||
|
||||
//holds thread handles to dispatch to
|
||||
struct WindowContext<'a>{
|
||||
manual_mouse_lock:bool,
|
||||
mouse:strafesnet_common::mouse::MouseState,//std::sync::Arc<std::sync::Mutex<>>
|
||||
screen_size:glam::UVec2,
|
||||
window:&'a winit::window::Window,
|
||||
physics_thread:crate::compat_worker::QNWorker<'a, TimedInstruction<crate::physics_worker::Instruction>>,
|
||||
}
|
||||
|
||||
impl WindowContext<'_>{
|
||||
fn get_middle_of_screen(&self)->winit::dpi::PhysicalPosition<f32>{
|
||||
winit::dpi::PhysicalPosition::new(self.screen_size.x as f32/2.0,self.screen_size.y as f32/2.0)
|
||||
}
|
||||
fn window_event(&mut self,time:integer::Time,event: winit::event::WindowEvent) {
|
||||
match event {
|
||||
winit::event::WindowEvent::DroppedFile(path)=>{
|
||||
match crate::file::load(path.as_path()){
|
||||
Ok(map)=>self.physics_thread.send(TimedInstruction{time,instruction:crate::physics_worker::Instruction::ChangeMap(map)}).unwrap(),
|
||||
Err(e)=>println!("Failed to load map: {e}"),
|
||||
}
|
||||
},
|
||||
winit::event::WindowEvent::Focused(state)=>{
|
||||
//pause unpause
|
||||
self.physics_thread.send(TimedInstruction{
|
||||
time,
|
||||
instruction:crate::physics_worker::Instruction::SetPaused(!state),
|
||||
}).unwrap();
|
||||
//recalculate pressed keys on focus
|
||||
},
|
||||
winit::event::WindowEvent::KeyboardInput{
|
||||
event:winit::event::KeyEvent{state,logical_key,repeat:false,..},
|
||||
..
|
||||
}=>{
|
||||
let s=match state{
|
||||
winit::event::ElementState::Pressed=>true,
|
||||
winit::event::ElementState::Released=>false,
|
||||
};
|
||||
match logical_key{
|
||||
winit::keyboard::Key::Named(winit::keyboard::NamedKey::Tab)=>{
|
||||
if s{
|
||||
self.manual_mouse_lock=false;
|
||||
match self.window.set_cursor_position(self.get_middle_of_screen()){
|
||||
Ok(())=>(),
|
||||
Err(e)=>println!("Could not set cursor position: {:?}",e),
|
||||
}
|
||||
match self.window.set_cursor_grab(winit::window::CursorGrabMode::None){
|
||||
Ok(())=>(),
|
||||
Err(e)=>println!("Could not release cursor: {:?}",e),
|
||||
}
|
||||
}else{
|
||||
//if cursor is outside window don't lock but apparently there's no get pos function
|
||||
//let pos=window.get_cursor_pos();
|
||||
match self.window.set_cursor_grab(winit::window::CursorGrabMode::Locked){
|
||||
Ok(())=>(),
|
||||
Err(_)=>{
|
||||
match self.window.set_cursor_grab(winit::window::CursorGrabMode::Confined){
|
||||
Ok(())=>(),
|
||||
Err(e)=>{
|
||||
self.manual_mouse_lock=true;
|
||||
println!("Could not confine cursor: {:?}",e)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.window.set_cursor_visible(s);
|
||||
},
|
||||
winit::keyboard::Key::Named(winit::keyboard::NamedKey::F11)=>{
|
||||
if s{
|
||||
if self.window.fullscreen().is_some(){
|
||||
self.window.set_fullscreen(None);
|
||||
}else{
|
||||
self.window.set_fullscreen(Some(winit::window::Fullscreen::Borderless(None)));
|
||||
}
|
||||
}
|
||||
},
|
||||
winit::keyboard::Key::Named(winit::keyboard::NamedKey::Escape)=>{
|
||||
if s{
|
||||
self.manual_mouse_lock=false;
|
||||
match self.window.set_cursor_grab(winit::window::CursorGrabMode::None){
|
||||
Ok(())=>(),
|
||||
Err(e)=>println!("Could not release cursor: {:?}",e),
|
||||
}
|
||||
self.window.set_cursor_visible(true);
|
||||
}
|
||||
},
|
||||
keycode=>{
|
||||
if let Some(input_instruction)=match keycode{
|
||||
winit::keyboard::Key::Named(winit::keyboard::NamedKey::Space)=>Some(InputInstruction::Jump(s)),
|
||||
winit::keyboard::Key::Character(key)=>match key.as_str(){
|
||||
"w"|"W"=>Some(InputInstruction::MoveForward(s)),
|
||||
"a"|"A"=>Some(InputInstruction::MoveLeft(s)),
|
||||
"s"|"S"=>Some(InputInstruction::MoveBack(s)),
|
||||
"d"|"D"=>Some(InputInstruction::MoveRight(s)),
|
||||
"e"|"E"=>Some(InputInstruction::MoveUp(s)),
|
||||
"q"|"Q"=>Some(InputInstruction::MoveDown(s)),
|
||||
"z"|"Z"=>Some(InputInstruction::Zoom(s)),
|
||||
"r"|"R"=>if s{
|
||||
//mouse needs to be reset since the position is absolute
|
||||
self.mouse=strafesnet_common::mouse::MouseState::default();
|
||||
Some(InputInstruction::ResetAndRestart)
|
||||
}else{None},
|
||||
"f"|"F"=>if s{Some(InputInstruction::PracticeFly)}else{None},
|
||||
_=>None,
|
||||
},
|
||||
_=>None,
|
||||
}{
|
||||
self.physics_thread.send(TimedInstruction{
|
||||
time,
|
||||
instruction:crate::physics_worker::Instruction::Input(input_instruction),
|
||||
}).unwrap();
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
_=>(),
|
||||
}
|
||||
}
|
||||
|
||||
fn device_event(&mut self,time:integer::Time,event: winit::event::DeviceEvent) {
|
||||
match event {
|
||||
winit::event::DeviceEvent::MouseMotion {
|
||||
delta,//these (f64,f64) are integers on my machine
|
||||
} => {
|
||||
if self.manual_mouse_lock{
|
||||
match self.window.set_cursor_position(self.get_middle_of_screen()){
|
||||
Ok(())=>(),
|
||||
Err(e)=>println!("Could not set cursor position: {:?}",e),
|
||||
}
|
||||
}
|
||||
//do not step the physics because the mouse polling rate is higher than the physics can run.
|
||||
//essentially the previous input will be overwritten until a true step runs
|
||||
//which is fine because they run all the time.
|
||||
let delta=glam::ivec2(delta.0 as i32,delta.1 as i32);
|
||||
self.mouse.pos+=delta;
|
||||
self.physics_thread.send(TimedInstruction{
|
||||
time,
|
||||
instruction:crate::physics_worker::Instruction::Input(InputInstruction::MoveMouse(self.mouse.pos)),
|
||||
}).unwrap();
|
||||
},
|
||||
winit::event::DeviceEvent::MouseWheel {
|
||||
delta,
|
||||
} => {
|
||||
println!("mousewheel {:?}",delta);
|
||||
if false{//self.physics.style.use_scroll{
|
||||
self.physics_thread.send(TimedInstruction{
|
||||
time,
|
||||
instruction:crate::physics_worker::Instruction::Input(InputInstruction::Jump(true)),//activates the immediate jump path, but the style modifier prevents controls&CONTROL_JUMP bit from being set to auto jump
|
||||
}).unwrap();
|
||||
}
|
||||
}
|
||||
_=>(),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn worker<'a>(
|
||||
window:&'a winit::window::Window,
|
||||
setup_context:crate::setup::SetupContext<'a>,
|
||||
)->crate::compat_worker::QNWorker<'a,TimedInstruction<WindowInstruction>>{
|
||||
// WindowContextSetup::new
|
||||
let user_settings=crate::settings::read_user_settings();
|
||||
|
||||
let mut graphics=crate::graphics::GraphicsState::new(&setup_context.device,&setup_context.queue,&setup_context.config);
|
||||
graphics.load_user_settings(&user_settings);
|
||||
|
||||
//WindowContextSetup::into_context
|
||||
let screen_size=glam::uvec2(setup_context.config.width,setup_context.config.height);
|
||||
let graphics_thread=crate::graphics_worker::new(graphics,setup_context.config,setup_context.surface,setup_context.device,setup_context.queue);
|
||||
let mut window_context=WindowContext{
|
||||
manual_mouse_lock:false,
|
||||
mouse:strafesnet_common::mouse::MouseState::default(),
|
||||
//make sure to update this!!!!!
|
||||
screen_size,
|
||||
window,
|
||||
physics_thread:crate::physics_worker::new(
|
||||
graphics_thread,
|
||||
user_settings,
|
||||
),
|
||||
};
|
||||
|
||||
//WindowContextSetup::into_worker
|
||||
crate::compat_worker::QNWorker::new(move |ins:TimedInstruction<WindowInstruction>|{
|
||||
match ins.instruction{
|
||||
WindowInstruction::RequestRedraw=>{
|
||||
window_context.window.request_redraw();
|
||||
}
|
||||
WindowInstruction::WindowEvent(window_event)=>{
|
||||
window_context.window_event(ins.time,window_event);
|
||||
},
|
||||
WindowInstruction::DeviceEvent(device_event)=>{
|
||||
window_context.device_event(ins.time,device_event);
|
||||
},
|
||||
WindowInstruction::Resize(size)=>{
|
||||
window_context.physics_thread.send(
|
||||
TimedInstruction{
|
||||
time:ins.time,
|
||||
instruction:crate::physics_worker::Instruction::Resize(size)
|
||||
}
|
||||
).unwrap();
|
||||
}
|
||||
WindowInstruction::Render=>{
|
||||
window_context.physics_thread.send(
|
||||
TimedInstruction{
|
||||
time:ins.time,
|
||||
instruction:crate::physics_worker::Instruction::Render
|
||||
}
|
||||
).unwrap();
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
216
src/worker.rs
216
src/worker.rs
@ -1,216 +0,0 @@
|
||||
use std::thread;
|
||||
use std::sync::{mpsc,Arc};
|
||||
use parking_lot::Mutex;
|
||||
|
||||
//WorkerPool
|
||||
struct Pool(u32);
|
||||
enum PoolOrdering{
|
||||
Single,//single thread cannot get out of order
|
||||
Ordered(u32),//order matters and should be buffered/dropped according to ControlFlow
|
||||
Unordered(u32),//order does not matter
|
||||
}
|
||||
//WorkerInput
|
||||
enum Input{
|
||||
//no input, workers have everything needed at creation
|
||||
None,
|
||||
//Immediate input to any available worker, dropped if they are overflowing (all workers are busy)
|
||||
Immediate,
|
||||
//Queued input is ordered, but serial jobs that mutate state (such as running physics) can only be done with a single worker
|
||||
Queued,//"Fifo"
|
||||
//Query a function to get next input when a thread becomes available
|
||||
//worker stops querying when Query function returns None and dies after all threads complete
|
||||
//lifetimes sound crazy on this one
|
||||
Query,
|
||||
//Queue of length one, the input is replaced if it is submitted twice before the current work finishes
|
||||
Mailbox,
|
||||
}
|
||||
//WorkerOutput
|
||||
enum Output{
|
||||
None(Pool),
|
||||
Realtime(PoolOrdering),//outputs are dropped if they are out of order and order is demanded
|
||||
Buffered(PoolOrdering),//outputs are held back internally if they are out of order and order is demanded
|
||||
}
|
||||
|
||||
//It would be possible to implement all variants
|
||||
//with a query input function and callback output function but I'm not sure if that's worth it.
|
||||
//Immediate = Condvar
|
||||
//Queued = receiver.recv()
|
||||
//a callback function would need to use an async runtime!
|
||||
|
||||
//realtime output is an arc mutex of the output value that is assigned every time a worker completes a job
|
||||
//buffered output produces a receiver object that can be passed to the creation of another worker
|
||||
//when ordering is requested, output is ordered by the order each thread is run
|
||||
//which is the same as the order that the input data is processed except for Input::None which has no input data
|
||||
//WorkerDescription
|
||||
struct Description{
|
||||
input:Input,
|
||||
output:Output,
|
||||
}
|
||||
|
||||
//The goal here is to have a worker thread that parks itself when it runs out of work.
|
||||
//The worker thread publishes the result of its work back to the worker object for every item in the work queue.
|
||||
//Previous values do not matter as soon as a new value is produced, which is why it's called "Realtime"
|
||||
//The physics (target use case) knows when it has not changed the body, so not updating the value is also an option.
|
||||
|
||||
/*
|
||||
QR = WorkerDescription{
|
||||
input:Queued,
|
||||
output:Realtime(Single),
|
||||
}
|
||||
*/
|
||||
pub struct QRWorker<Task:Send,Value:Clone>{
|
||||
sender: mpsc::Sender<Task>,
|
||||
value:Arc<Mutex<Value>>,
|
||||
}
|
||||
|
||||
impl<Task:Send+'static,Value:Clone+Send+'static> QRWorker<Task,Value>{
|
||||
pub fn new<F:FnMut(Task)->Value+Send+'static>(value:Value,mut f:F) -> Self {
|
||||
let (sender, receiver) = mpsc::channel::<Task>();
|
||||
let ret=Self {
|
||||
sender,
|
||||
value:Arc::new(Mutex::new(value)),
|
||||
};
|
||||
let value=ret.value.clone();
|
||||
thread::spawn(move || {
|
||||
loop {
|
||||
match receiver.recv() {
|
||||
Ok(task) => {
|
||||
let v=f(task);//make sure function is evaluated before lock is acquired
|
||||
*value.lock()=v;
|
||||
}
|
||||
Err(_) => {
|
||||
println!("Worker stopping.",);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn send(&self,task:Task)->Result<(), mpsc::SendError<Task>>{
|
||||
self.sender.send(task)
|
||||
}
|
||||
|
||||
pub fn grab_clone(&self)->Value{
|
||||
self.value.lock().clone()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
QN = WorkerDescription{
|
||||
input:Queued,
|
||||
output:None(Single),
|
||||
}
|
||||
*/
|
||||
//None Output Worker does all its work internally from the perspective of the work submitter
|
||||
pub struct QNWorker<'a,Task:Send>{
|
||||
sender: mpsc::Sender<Task>,
|
||||
handle:thread::ScopedJoinHandle<'a,()>,
|
||||
}
|
||||
|
||||
impl<'a,Task:Send+'a> QNWorker<'a,Task>{
|
||||
pub fn new<F:FnMut(Task)+Send+'a>(scope:&'a thread::Scope<'a,'_>,mut f:F)->QNWorker<'a,Task>{
|
||||
let (sender,receiver)=mpsc::channel::<Task>();
|
||||
let handle=scope.spawn(move ||{
|
||||
loop {
|
||||
match receiver.recv() {
|
||||
Ok(task)=>f(task),
|
||||
Err(_)=>{
|
||||
println!("Worker stopping.",);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Self{
|
||||
sender,
|
||||
handle,
|
||||
}
|
||||
}
|
||||
pub fn send(&self,task:Task)->Result<(),mpsc::SendError<Task>>{
|
||||
self.sender.send(task)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
IN = WorkerDescription{
|
||||
input:Immediate,
|
||||
output:None(Single),
|
||||
}
|
||||
*/
|
||||
//Inputs are dropped if the worker is busy
|
||||
pub struct INWorker<'a,Task:Send>{
|
||||
sender: mpsc::SyncSender<Task>,
|
||||
handle:thread::ScopedJoinHandle<'a,()>,
|
||||
}
|
||||
|
||||
impl<'a,Task:Send+'a> INWorker<'a,Task>{
|
||||
pub fn new<F:FnMut(Task)+Send+'a>(scope:&'a thread::Scope<'a,'_>,mut f:F)->INWorker<'a,Task>{
|
||||
let (sender,receiver)=mpsc::sync_channel::<Task>(1);
|
||||
let handle=scope.spawn(move ||{
|
||||
loop {
|
||||
match receiver.recv() {
|
||||
Ok(task)=>f(task),
|
||||
Err(_)=>{
|
||||
println!("Worker stopping.",);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Self{
|
||||
sender,
|
||||
handle,
|
||||
}
|
||||
}
|
||||
//blocking!
|
||||
pub fn blocking_send(&self,task:Task)->Result<(), mpsc::SendError<Task>>{
|
||||
self.sender.send(task)
|
||||
}
|
||||
pub fn send(&self,task:Task)->Result<(), mpsc::TrySendError<Task>>{
|
||||
self.sender.try_send(task)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test{
|
||||
use super::{thread,QRWorker};
|
||||
use crate::physics;
|
||||
use strafesnet_common::{integer,instruction};
|
||||
#[test]//How to run this test with printing: cargo test --release -- --nocapture
|
||||
fn test_worker() {
|
||||
// Create the worker thread
|
||||
let test_body=physics::Body::new(integer::vec3::ONE,integer::vec3::ONE,integer::vec3::ONE,integer::Time::ZERO);
|
||||
let worker=QRWorker::new(physics::Body::ZERO,
|
||||
|_|physics::Body::new(integer::vec3::ONE,integer::vec3::ONE,integer::vec3::ONE,integer::Time::ZERO)
|
||||
);
|
||||
|
||||
// Send tasks to the worker
|
||||
for _ in 0..5 {
|
||||
let task = instruction::TimedInstruction{
|
||||
time:integer::Time::ZERO,
|
||||
instruction:strafesnet_common::physics::Instruction::Idle,
|
||||
};
|
||||
worker.send(task).unwrap();
|
||||
}
|
||||
|
||||
// Optional: Signal the worker to stop (in a real-world scenario)
|
||||
// sender.send("STOP".to_string()).unwrap();
|
||||
|
||||
// Sleep to allow the worker thread to finish processing
|
||||
thread::sleep(std::time::Duration::from_millis(10));
|
||||
|
||||
// Send a new task
|
||||
let task = instruction::TimedInstruction{
|
||||
time:integer::Time::ZERO,
|
||||
instruction:strafesnet_common::physics::Instruction::Idle,
|
||||
};
|
||||
worker.send(task).unwrap();
|
||||
|
||||
//assert_eq!(test_body,worker.grab_clone());
|
||||
|
||||
// wait long enough to see print from final task
|
||||
thread::sleep(std::time::Duration::from_millis(10));
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
mangohud ../target/release/strafe-client bhop_maps/5692113331.snfm
|
@ -1 +0,0 @@
|
||||
/run/media/quat/Files/Documents/map-files/verify-scripts/maps/bhop_snfm
|
@ -1 +0,0 @@
|
||||
cargo build --release --target x86_64-pc-windows-gnu --all-features
|
@ -1 +0,0 @@
|
||||
mangohud ../target/release/strafe-client bhop_maps/5692124338.snfm
|
@ -1,4 +0,0 @@
|
||||
mkdir -p ../target/demo
|
||||
mv ../target/x86_64-pc-windows-gnu/release/strafe-client.exe ../target/demo/strafe-client.exe
|
||||
rm ../target/demo.7z
|
||||
7z a -t7z -mx=9 -mfb=273 -ms -md=31 -myx=9 -mtm=- -mmt -mmtf -md=1536m -mmf=bt3 -mmc=10000 -mpb=0 -mlc=0 ../target/demo.7z ../target/demo
|
@ -1,4 +0,0 @@
|
||||
[camera]
|
||||
sensitivity_x=98384
|
||||
fov_y=1.0
|
||||
#fov_x_from_y_ratio=1.33333333333333333333333333333333
|
@ -1 +0,0 @@
|
||||
/run/media/quat/Files/Documents/map-files/verify-scripts/maps/surf_snfm
|
@ -1 +0,0 @@
|
||||
mangohud ../target/release/strafe-client bhop_maps/5692152916.snfm
|
@ -1 +0,0 @@
|
||||
mangohud ../target/release/strafe-client surf_maps/5692145408.snfm
|
Loading…
Reference in New Issue
Block a user