Compare commits

..

21 Commits

Author SHA1 Message Date
f0479181f6 do not step physics on mouse input, only update pos (overwriting previous pos) 2023-09-21 16:39:51 -07:00
971aa9e287 fix angles 2023-09-21 16:39:51 -07:00
6a79c4ec24 accumulate deltas 2023-09-21 16:39:51 -07:00
9025bea5ef implement jump() + remove jump_trying + prevent air jumping 2023-09-21 16:39:51 -07:00
6ed71073f6 wip 2 2023-09-21 16:39:51 -07:00
91bfa70f05 wip 2023-09-21 16:38:47 -07:00
1c9bc347f6 clear prev map 2023-09-21 16:01:02 -07:00
c9afa2d059 only load Block shaped parts 2023-09-21 15:45:02 -07:00
1a66dfbaf7 v0.5.0 model color + drag & drop to load maps 2023-09-21 15:45:02 -07:00
847209aac4 runtime load physics 2023-09-21 15:45:02 -07:00
42ba757ec0 plumb color everywhere 2023-09-21 13:08:13 -07:00
1cee3b52ac switch entity_transforms to storage buffers to remove hardcoded part cap 2023-09-21 11:57:17 -07:00
e27ce3b507 dynamic image size 2023-09-21 11:56:03 -07:00
bc8f2bd566 finalize physics models 2023-09-21 00:03:14 -07:00
eed932212d comment code that will be deleted soon and cause merge conflicts for no reason 2023-09-20 23:45:55 -07:00
73edb9ff95 drag & drop to load roblox map 2023-09-20 23:45:55 -07:00
ae0c9e73ee make handy unit cube 2023-09-20 23:44:12 -07:00
953d424a57 load_roblox module 2023-09-20 23:44:12 -07:00
ca919b92fd add roblox deps 2023-09-20 23:44:12 -07:00
1de3501e89 no default transform 2023-09-20 22:29:46 -07:00
0135b17917 make proper model data and stop passing device into add_obj 2023-09-20 22:29:33 -07:00
8 changed files with 224 additions and 146 deletions

2
Cargo.lock generated

@ -1645,7 +1645,7 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strafe-client"
version = "0.4.0"
version = "0.5.0"
dependencies = [
"async-executor",
"bytemuck",

@ -1,6 +1,6 @@
[package]
name = "strafe-client"
version = "0.4.0"
version = "0.5.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

Binary file not shown.

@ -254,8 +254,7 @@ pub struct PhysicsState {
pub hitbox_halfsize: glam::Vec3,
pub contacts: std::collections::HashSet::<RelativeCollision>,
//pub intersections: Vec<ModelId>,
//temp
pub models_cringe_clone: Vec<Model>,
pub models: Vec<ModelPhysics>,
//camera must exist in state because wormholes modify the camera, also camera punch
pub camera: Camera,
pub mouse_interpolation: MouseInterpolationState,
@ -382,13 +381,13 @@ impl Aabb {
type TreyMeshFace = AabbFace;
type TreyMesh = Aabb;
pub struct Model {
pub struct ModelPhysics {
//A model is a thing that has a hitbox. can be represented by a list of TreyMesh-es
//in this iteration, all it needs is extents.
transform: glam::Mat4,
}
impl Model {
impl ModelPhysics {
pub fn new(transform:glam::Mat4) -> Self {
Self{transform}
}
@ -432,10 +431,10 @@ pub struct RelativeCollision {
}
impl RelativeCollision {
pub fn mesh(&self,models:&Vec<Model>) -> TreyMesh {
pub fn mesh(&self,models:&Vec<ModelPhysics>) -> TreyMesh {
return models.get(self.model as usize).unwrap().face_mesh(self.face)
}
pub fn normal(&self,models:&Vec<Model>) -> glam::Vec3 {
pub fn normal(&self,models:&Vec<ModelPhysics>) -> glam::Vec3 {
return models.get(self.model as usize).unwrap().face_normal(self.face)
}
}
@ -494,7 +493,7 @@ impl PhysicsState {
fn contact_constrain_velocity(&self,velocity:&mut glam::Vec3){
for contact in self.contacts.iter() {
let n=contact.normal(&self.models_cringe_clone);
let n=contact.normal(&self.models);
let d=velocity.dot(n);
if d<0f32{
(*velocity)-=d/n.length_squared()*n;
@ -503,7 +502,7 @@ impl PhysicsState {
}
fn contact_constrain_acceleration(&self,acceleration:&mut glam::Vec3){
for contact in self.contacts.iter() {
let n=contact.normal(&self.models_cringe_clone);
let n=contact.normal(&self.models);
let d=acceleration.dot(n);
if d<0f32{
(*acceleration)-=d/n.length_squared()*n;
@ -603,7 +602,7 @@ impl PhysicsState {
let mut best_time=time_limit;
let mut exit_face:Option<TreyMeshFace>=None;
let mesh0=self.mesh();
let mesh1=self.models_cringe_clone.get(collision_data.model as usize).unwrap().mesh();
let mesh1=self.models.get(collision_data.model as usize).unwrap().mesh();
let (v,a)=(-self.body.velocity,self.body.acceleration);
//collect x
match collision_data.face {
@ -754,7 +753,7 @@ impl PhysicsState {
let mut best_time=time_limit;
let mut best_face:Option<TreyMeshFace>=None;
let mesh0=self.mesh();
let mesh1=self.models_cringe_clone.get(model_id as usize).unwrap().mesh();
let mesh1=self.models.get(model_id as usize).unwrap().mesh();
let (p,v,a)=(self.body.position,self.body.velocity,self.body.acceleration);
//collect x
for t in zeroes2(mesh0.max.x-mesh1.min.x,v.x,0.5*a.x) {
@ -879,7 +878,7 @@ impl crate::instruction::InstructionEmitter<PhysicsInstruction> for PhysicsState
collector.collect(self.predict_collision_end(self.time,time_limit,collision_data));
}
//check for collision start instructions (against every part in the game with no optimization!!)
for i in 0..self.models_cringe_clone.len() {
for i in 0..self.models.len() {
collector.collect(self.predict_collision_start(self.time,time_limit,i as u32));
}
if self.grounded {
@ -1018,4 +1017,4 @@ impl crate::instruction::InstructionConsumer<PhysicsInstruction> for PhysicsStat
},
}
}
}
}

@ -51,7 +51,7 @@ pub trait Example: 'static + Sized {
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn update(&mut self, device: &wgpu::Device, event: WindowEvent);
fn device_event(&mut self, event: DeviceEvent);
fn render(
&mut self,
@ -344,7 +344,7 @@ fn start<E: Example>(
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
example.update(&device,event);
}
},
event::Event::DeviceEvent {

@ -10,8 +10,7 @@ fn class_is_a(class: &str, superclass: &str) -> bool {
}
return false
}
pub fn get_objects(buf_thing: std::io::BufReader<&[u8]>, superclass: &str) -> Result<std::vec::Vec<rbx_dom_weak::Instance>, Box<dyn std::error::Error>> {
pub fn get_objects<R: std::io::Read>(buf_thing: R, superclass: &str) -> Result<std::vec::Vec<rbx_dom_weak::Instance>, Box<dyn std::error::Error>> {
// Using buffered I/O is recommended with rbx_binary
let dom = rbx_binary::from_reader(buf_thing)?;

@ -11,6 +11,7 @@ struct Vertex {
pos: [f32; 3],
texture: [f32; 2],
normal: [f32; 3],
color: [f32; 4],
}
struct Entity {
@ -18,19 +19,38 @@ struct Entity {
index_buf: wgpu::Buffer,
}
struct ModelInstance {
transform: glam::Mat4,
color: glam::Vec4,
}
struct ModelData {
transforms: Vec<glam::Mat4>,
instances: Vec<ModelInstance>,
vertices: Vec<Vertex>,
entities: Vec<Vec<u16>>,
}
impl ModelData {
const COLOR_FLOATS_WHITE: [f32;4] = [1.0,1.0,1.0,1.0];
const COLOR_VEC4_WHITE: glam::Vec4 = glam::vec4(1.0,1.0,1.0,1.0);
}
struct ModelGraphics {
transforms: Vec<glam::Mat4>,
instances: Vec<ModelInstance>,
vertex_buf: wgpu::Buffer,
entities: Vec<Entity>,
bind_group: wgpu::BindGroup,
model_buf: wgpu::Buffer,
}
pub struct GraphicsSamplers{
repeat: wgpu::Sampler,
}
pub struct GraphicsBindGroupLayouts{
model: wgpu::BindGroupLayout,
}
pub struct GraphicsBindGroups {
camera: wgpu::BindGroup,
skybox_texture: wgpu::BindGroup,
@ -47,6 +67,9 @@ pub struct GraphicsData {
physics: strafe_client::body::PhysicsState,
pipelines: GraphicsPipelines,
bind_groups: GraphicsBindGroups,
bind_group_layouts: GraphicsBindGroupLayouts,
samplers: GraphicsSamplers,
temp_squid_texture_view: wgpu::TextureView,
camera_buf: wgpu::Buffer,
models: Vec<ModelGraphics>,
depth_view: wgpu::TextureView,
@ -79,8 +102,8 @@ impl GraphicsData {
depth_texture.create_view(&wgpu::TextureViewDescriptor::default())
}
fn generate_modeldatas_roblox(&self,input:std::io::BufReader<&[u8]>) -> Vec<ModelData>{
let mut modeldata=generate_modeldatas(self.handy_unit_cube.clone())[0];
fn generate_modeldatas_roblox<R: std::io::Read>(&self,input:R) -> Vec<ModelData>{
let mut modeldatas=generate_modeldatas(self.handy_unit_cube.clone(),ModelData::COLOR_FLOATS_WHITE);
match strafe_client::load_roblox::get_objects(input, "BasePart") {
Ok(objects)=>{
for object in objects.iter() {
@ -88,59 +111,122 @@ impl GraphicsData {
Some(rbx_dom_weak::types::Variant::CFrame(cf)),
Some(rbx_dom_weak::types::Variant::Vector3(size)),
Some(rbx_dom_weak::types::Variant::Float32(transparency)),
Some(rbx_dom_weak::types::Variant::Color3(color3)),
Some(rbx_dom_weak::types::Variant::Color3uint8(color3)),
Some(rbx_dom_weak::types::Variant::Enum(shape)),
) = (
object.properties.get("CFrame"),
object.properties.get("Size"),
object.properties.get("Transparency"),
object.properties.get("Color"),
object.properties.get("Shape"),//this will also skip unions
)
{
if *transparency==1.0 {
if *transparency==1.0||shape.to_u32()!=1 {
continue;
}
modeldata.transforms.push(
glam::Mat4::from_translation(
glam::Vec3::new(cf.position.x,cf.position.y,cf.position.z)
)
* glam::Mat4::from_mat3(
glam::Mat3::from_cols(
glam::Vec3::new(cf.orientation.x.x,cf.orientation.y.x,cf.orientation.z.x),
glam::Vec3::new(cf.orientation.x.y,cf.orientation.y.y,cf.orientation.z.y),
glam::Vec3::new(cf.orientation.x.z,cf.orientation.y.z,cf.orientation.z.z),
modeldatas[0].instances.push(ModelInstance {
transform:glam::Mat4::from_translation(
glam::Vec3::new(cf.position.x,cf.position.y,cf.position.z)
)
* glam::Mat4::from_mat3(
glam::Mat3::from_cols(
glam::Vec3::new(cf.orientation.x.x,cf.orientation.y.x,cf.orientation.z.x),
glam::Vec3::new(cf.orientation.x.y,cf.orientation.y.y,cf.orientation.z.y),
glam::Vec3::new(cf.orientation.x.z,cf.orientation.y.z,cf.orientation.z.z),
),
)
* glam::Mat4::from_scale(
glam::Vec3::new(size.x,size.y,size.z)/2.0
),
)
* glam::Mat4::from_scale(
glam::Vec3::new(size.x,size.y,size.z)/2.0
)
)
color: glam::vec4(color3.r as f32/255f32, color3.g as f32/255f32, color3.b as f32/255f32, 1.0-*transparency),
})
}
}
},
Err(e) => println!("lmao err {:?}", e),
}
vec![modeldata]
modeldatas
}
fn generate_model_graphics(&mut self,modeldatas:Vec<ModelData>){
//
fn generate_model_physics(&mut self,modeldatas:&Vec<ModelData>){
self.physics.models.append(&mut modeldatas.iter().map(|m|
//make aabb and run vertices to get realistic bounds
m.instances.iter().map(|t|strafe_client::body::ModelPhysics::new(t.transform))
).flatten().collect());
}
fn generate_model_graphics(&mut self,device:&wgpu::Device,mut modeldatas:Vec<ModelData>){
//drain the modeldata vec so entities can be /moved/ to models.entities
self.models.reserve(modeldatas.len());
for (i,modeldata) in modeldatas.drain(..).enumerate() {
let model_uniforms = get_instances_buffer_data(&modeldata.instances);
let model_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("ModelGraphics{}",i).as_str()),
contents: bytemuck::cast_slice(&model_uniforms),
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
});
let model_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &self.bind_group_layouts.model,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: model_buf.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&self.temp_squid_texture_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&self.samplers.repeat),
},
],
label: Some(format!("ModelGraphics{}",i).as_str()),
});
let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex"),
contents: bytemuck::cast_slice(&modeldata.vertices),
usage: wgpu::BufferUsages::VERTEX,
});
//all of these are being moved here
self.models.push(ModelGraphics{
instances:modeldata.instances,
vertex_buf,
entities: modeldata.entities.iter().map(|indices|{
let index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index"),
contents: bytemuck::cast_slice(&indices),
usage: wgpu::BufferUsages::INDEX,
});
Entity {
index_buf,
index_count: indices.len() as u32,
}
}).collect(),
bind_group: model_bind_group,
model_buf,
})
}
}
}
fn get_transform_uniform_data(transforms:&Vec<glam::Mat4>) -> Vec<f32> {
let mut raw = Vec::with_capacity(4*4*transforms.len());
for (i,t) in transforms.iter().enumerate(){
let mut v = raw.split_off(4*4*i);
raw.extend_from_slice(&AsRef::<[f32; 4*4]>::as_ref(t)[..]);
fn get_instances_buffer_data(instances:&Vec<ModelInstance>) -> Vec<f32> {
const SIZE: usize=4*4+4;//let size=std::mem::size_of::<ModelInstance>();
let mut raw = Vec::with_capacity(SIZE*instances.len());
for (i,mi) in instances.iter().enumerate(){
let mut v = raw.split_off(SIZE*i);
raw.extend_from_slice(&AsRef::<[f32; 4*4]>::as_ref(&mi.transform)[..]);
raw.extend_from_slice(AsRef::<[f32; 4]>::as_ref(&mi.color));
raw.append(&mut v);
}
raw
}
fn generate_modeldatas<'a>(data:obj::ObjData) -> &'a mut Vec<ModelData>{
fn generate_modeldatas(data:obj::ObjData,color:[f32;4]) -> Vec<ModelData>{
let mut modeldatas=Vec::new();
let mut vertices = Vec::new();
let mut vertex_index = std::collections::HashMap::<obj::IndexTuple,u16>::new();
for object in data.objects {
vertices.clear();
vertex_index.clear();
let mut entities = Vec::new();
for group in object.groups {
let mut indices = Vec::new();
@ -156,6 +242,7 @@ fn generate_modeldatas<'a>(data:obj::ObjData) -> &'a mut Vec<ModelData>{
pos: data.position[vert.0],
texture: data.texture[vert.1.unwrap()],
normal: data.normal[vert.2.unwrap()],
color,
});
vertex_index.insert(vert,i);
indices.push(i);
@ -166,12 +253,12 @@ fn generate_modeldatas<'a>(data:obj::ObjData) -> &'a mut Vec<ModelData>{
entities.push(indices);
}
modeldatas.push(ModelData {
transforms: vec![],
vertices,
instances: Vec::new(),
vertices:vertices.clone(),
entities,
});
}
&mut modeldatas
modeldatas
}
@ -195,7 +282,12 @@ impl strafe_client::framework::Example for GraphicsData {
| wgpu::Features::TEXTURE_COMPRESSION_ETC2
| wgpu::Features::TEXTURE_COMPRESSION_BC
}
fn required_features() -> wgpu::Features {
wgpu::Features::STORAGE_RESOURCE_BINDING_ARRAY
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::default() //framework.rs was using goofy limits that caused me a multi-day headache
}
fn init(
config: &wgpu::SurfaceConfiguration,
_adapter: &wgpu::Adapter,
@ -277,21 +369,42 @@ impl strafe_client::framework::Example for GraphicsData {
material_libs: Vec::new(),
};
let mut modeldatas = Vec::<ModelData>::new();
modeldatas.append(generate_modeldatas(obj::ObjData::load_buf(&include_bytes!("../models/teslacyberv3.0.obj")[..]).unwrap()));
modeldatas.append(generate_modeldatas(obj::ObjData::load_buf(&include_bytes!("../models/suzanne.obj")[..]).unwrap()));
modeldatas.append(generate_modeldatas(obj::ObjData::load_buf(&include_bytes!("../models/teapot.obj")[..]).unwrap()));
modeldatas.append(generate_modeldatas(unit_cube.clone()));
modeldatas.append(&mut generate_modeldatas(obj::ObjData::load_buf(&include_bytes!("../models/teslacyberv3.0.obj")[..]).unwrap(),ModelData::COLOR_FLOATS_WHITE));
modeldatas.append(&mut generate_modeldatas(obj::ObjData::load_buf(&include_bytes!("../models/suzanne.obj")[..]).unwrap(),ModelData::COLOR_FLOATS_WHITE));
modeldatas.append(&mut generate_modeldatas(obj::ObjData::load_buf(&include_bytes!("../models/teapot.obj")[..]).unwrap(),ModelData::COLOR_FLOATS_WHITE));
modeldatas.append(&mut generate_modeldatas(unit_cube.clone(),ModelData::COLOR_FLOATS_WHITE));
println!("models.len = {:?}", modeldatas.len());
modeldatas[0].transforms[0]=glam::Mat4::from_translation(glam::vec3(10.,0.,-10.));
modeldatas[0].instances.push(ModelInstance{
transform:glam::Mat4::from_translation(glam::vec3(10.,0.,-10.)),
color:ModelData::COLOR_VEC4_WHITE,
});
//quad monkeys
modeldatas[1].transforms[0]=glam::Mat4::from_translation(glam::vec3(10.,5.,10.));
modeldatas[1].transforms.push(glam::Mat4::from_translation(glam::vec3(20.,5.,10.)));
modeldatas[1].transforms.push(glam::Mat4::from_translation(glam::vec3(10.,5.,20.)));
modeldatas[1].transforms.push(glam::Mat4::from_translation(glam::vec3(20.,5.,20.)));
modeldatas[1].instances.push(ModelInstance{
transform:glam::Mat4::from_translation(glam::vec3(10.,5.,10.)),
color:ModelData::COLOR_VEC4_WHITE,
});
modeldatas[1].instances.push(ModelInstance{
transform:glam::Mat4::from_translation(glam::vec3(20.,5.,10.)),
color:glam::vec4(1.0,0.0,0.0,1.0),
});
modeldatas[1].instances.push(ModelInstance{
transform:glam::Mat4::from_translation(glam::vec3(10.,5.,20.)),
color:glam::vec4(0.0,1.0,0.0,1.0),
});
modeldatas[1].instances.push(ModelInstance{
transform:glam::Mat4::from_translation(glam::vec3(20.,5.,20.)),
color:glam::vec4(0.0,0.0,1.0,1.0),
});
//teapot
modeldatas[2].transforms[0]=glam::Mat4::from_translation(glam::vec3(-10.,5.,10.));
modeldatas[2].instances.push(ModelInstance{
transform:glam::Mat4::from_translation(glam::vec3(-10.,5.,10.)),
color:ModelData::COLOR_VEC4_WHITE,
});
//ground
modeldatas[3].transforms[0]=glam::Mat4::from_translation(glam::vec3(0.,0.,0.))*glam::Mat4::from_scale(glam::vec3(160.0, 1.0, 160.0));
modeldatas[3].instances.push(ModelInstance{
transform:glam::Mat4::from_translation(glam::vec3(0.,0.,0.))*glam::Mat4::from_scale(glam::vec3(160.0, 1.0, 160.0)),
color:ModelData::COLOR_VEC4_WHITE,
});
let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
@ -336,7 +449,7 @@ impl strafe_client::framework::Example for GraphicsData {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
@ -401,7 +514,7 @@ impl strafe_client::framework::Example for GraphicsData {
grounded: false,
walkspeed: 18.0,
contacts: std::collections::HashSet::new(),
models_cringe_clone: modeldatas.iter().map(|m|m.transforms.iter().map(|t|strafe_client::body::Model::new(*t))).flatten().collect(),
models: Vec::new(),
walk: strafe_client::body::WalkState::new(),
hitbox_halfsize: glam::vec3(1.0,2.5,1.0),
camera: strafe_client::body::Camera::from_offset(glam::vec3(0.0,4.5-2.5,0.0),(config.width as f32)/(config.height as f32)),
@ -487,9 +600,13 @@ impl strafe_client::framework::Example for GraphicsData {
//squid
let squid_texture_view={
let bytes = &include_bytes!("../images/squid.dds")[..];
let image = ddsfile::Dds::read(&mut std::io::Cursor::new(&bytes)).unwrap();
let size = wgpu::Extent3d {
width: 1076,
height: 1076,
width: image.get_width(),
height: image.get_height(),
depth_or_array_layers: 1,
};
@ -499,10 +616,6 @@ impl strafe_client::framework::Example for GraphicsData {
};
let max_mips = layer_size.max_mips(wgpu::TextureDimension::D2);
let bytes = &include_bytes!("../images/squid.dds")[..];
let image = ddsfile::Dds::read(&mut std::io::Cursor::new(&bytes)).unwrap();
let texture = device.create_texture_with_data(
queue,
&wgpu::TextureDescriptor {
@ -525,58 +638,6 @@ impl strafe_client::framework::Example for GraphicsData {
})
};
//drain the modeldata vec so entities can be /moved/ to models.entities
let mut models = Vec::<ModelGraphics>::with_capacity(modeldatas.len());
for (i,modeldata) in modeldatas.drain(..).enumerate() {
let model_uniforms = get_transform_uniform_data(&modeldata.transforms);
let model_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("ModelGraphics{}",i).as_str()),
contents: bytemuck::cast_slice(&model_uniforms),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let model_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &model_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: model_buf.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&squid_texture_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&repeat_sampler),
},
],
label: Some(format!("ModelGraphics{}",i).as_str()),
});
let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex"),
contents: bytemuck::cast_slice(&modeldata.vertices),
usage: wgpu::BufferUsages::VERTEX,
});
//all of these are being moved here
models.push(ModelGraphics{
transforms:modeldata.transforms,
vertex_buf,
entities: modeldata.entities.iter().map(|indices|{
let index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index"),
contents: bytemuck::cast_slice(&indices),
usage: wgpu::BufferUsages::INDEX,
});
Entity {
index_buf,
index_count: indices.len() as u32,
}
}).collect(),
bind_group: model_bind_group,
model_buf,
})
}
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[
@ -624,7 +685,7 @@ impl strafe_client::framework::Example for GraphicsData {
buffers: &[wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2, 2 => Float32x3],
attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2, 2 => Float32x3, 3 => Float32x4],
}],
},
fragment: Some(wgpu::FragmentState {
@ -680,7 +741,7 @@ impl strafe_client::framework::Example for GraphicsData {
let depth_view = Self::create_depth_texture(config, device);
GraphicsData {
let mut graphics=GraphicsData {
handy_unit_cube:unit_cube,
start_time: Instant::now(),
screen_size: (config.width,config.height),
@ -694,23 +755,37 @@ impl strafe_client::framework::Example for GraphicsData {
skybox_texture:skybox_texture_bind_group,
},
camera_buf,
models,
models: Vec::new(),
depth_view,
staging_belt: wgpu::util::StagingBelt::new(0x100),
}
bind_group_layouts: GraphicsBindGroupLayouts { model: model_bind_group_layout },
samplers: GraphicsSamplers { repeat: repeat_sampler },
temp_squid_texture_view: squid_texture_view,
};
graphics.generate_model_physics(&modeldatas);
graphics.generate_model_graphics(&device,modeldatas);
return graphics;
}
#[allow(clippy::single_match)]
fn update(&mut self, event: winit::event::WindowEvent) {
//nothing atm
fn update(&mut self, device: &wgpu::Device, event: winit::event::WindowEvent) {
match event {
winit::event::WindowEvent::DroppedFile(path) => {
println!("opening file: {:?}", &path);
//oh boy! let's load the map!
let file=std::fs::File::open(path);
let input = std::io::BufReader::new(file);
let modeldatas=self.generate_modeldatas_roblox(input);
self.generate_model_graphics(modeldatas);
//also physics
if let Ok(file)=std::fs::File::open(path){
let input = std::io::BufReader::new(file);
let modeldatas=self.generate_modeldatas_roblox(input);
//if generate_modeldatas succeeds, clear the previous ones
self.models.clear();
self.physics.models.clear();
self.generate_model_physics(&modeldatas);
self.generate_model_graphics(device,modeldatas);
}else{
println!("Could not open file");
}
},
_=>(),
}
@ -814,7 +889,7 @@ impl strafe_client::framework::Example for GraphicsData {
.copy_from_slice(bytemuck::cast_slice(&camera_uniforms));
//This code only needs to run when the uniforms change
for model in self.models.iter() {
let model_uniforms = get_transform_uniform_data(&model.transforms);
let model_uniforms = get_instances_buffer_data(&model.instances);
self.staging_belt
.write_buffer(
&mut encoder,
@ -863,7 +938,7 @@ impl strafe_client::framework::Example for GraphicsData {
for entity in model.entities.iter() {
rpass.set_index_buffer(entity.index_buf.slice(..), wgpu::IndexFormat::Uint16);
rpass.draw_indexed(0..entity.index_count, 0, 0..model.transforms.len() as u32);
rpass.draw_indexed(0..entity.index_count, 0, 0..model.instances.len() as u32);
}
}

@ -41,15 +41,17 @@ fn vs_sky(@builtin(vertex_index) vertex_index: u32) -> SkyOutput {
return result;
}
const MAX_ENTITY_INSTANCES=1024;
struct ModelInstance{
transform:mat4x4<f32>,
//texture_transform:mat3x3<f32>,
color:vec4<f32>,
}
//my fancy idea is to create a megatexture for each model that includes all the textures each intance will need
//the texture transform then maps the texture coordinates to the location of the specific texture
//group 1 is the model
@group(1)
@binding(0)
var<uniform> entity_transforms: array<mat4x4<f32>,MAX_ENTITY_INSTANCES>;
//var<uniform> entity_texture_transforms: array<mat3x3<f32>,MAX_ENTITY_INSTANCES>;
//my fancy idea is to create a megatexture for each model that includes all the textures each intance will need
//the texture transform then maps the texture coordinates to the location of the specific texture
//how to do no texture?
var<storage> model_instances: array<ModelInstance>;
@group(1)
@binding(1)
var model_texture: texture_2d<f32>;
@ -62,6 +64,7 @@ struct EntityOutputTexture {
@location(1) texture: vec2<f32>,
@location(2) normal: vec3<f32>,
@location(3) view: vec3<f32>,
@location(4) color: vec4<f32>,
};
@vertex
fn vs_entity_texture(
@ -69,11 +72,13 @@ fn vs_entity_texture(
@location(0) pos: vec3<f32>,
@location(1) texture: vec2<f32>,
@location(2) normal: vec3<f32>,
@location(3) color: vec4<f32>,
) -> EntityOutputTexture {
var position: vec4<f32> = entity_transforms[instance] * vec4<f32>(pos, 1.0);
var position: vec4<f32> = model_instances[instance].transform * vec4<f32>(pos, 1.0);
var result: EntityOutputTexture;
result.normal = (entity_transforms[instance] * vec4<f32>(normal, 0.0)).xyz;
result.texture=texture;//(entity_texture_transforms[instance] * vec3<f32>(texture, 1.0)).xy;
result.normal = (model_instances[instance].transform * vec4<f32>(normal, 0.0)).xyz;
result.texture=texture;//(model_instances[instance].texture_transform * vec3<f32>(texture, 1.0)).xy;
result.color=model_instances[instance].color * color;
result.view = position.xyz - camera.cam_pos.xyz;
result.position = camera.proj * camera.view * position;
return result;
@ -99,7 +104,7 @@ fn fs_entity_texture(vertex: EntityOutputTexture) -> @location(0) vec4<f32> {
let d = dot(normal, incident);
let reflected = incident - 2.0 * d * normal;
let fragment_color = textureSample(model_texture, model_sampler, vertex.texture).rgb;
let fragment_color = textureSample(model_texture, model_sampler, vertex.texture)*vertex.color;
let reflected_color = textureSample(cube_texture, cube_sampler, reflected).rgb;
return vec4<f32>(mix(vec3<f32>(0.1) + 0.5 * reflected_color,fragment_color,1.0-pow(1.0-abs(d),2.0)), 1.0);
return mix(vec4<f32>(vec3<f32>(0.1) + 0.5 * reflected_color,1.0),fragment_color,1.0-pow(1.0-abs(d),2.0));
}