From d3e4918d3e1a8a1d20012d9399360e3cbe5444c7 Mon Sep 17 00:00:00 2001 From: Quaternions Date: Mon, 2 Oct 2023 01:57:15 -0700 Subject: [PATCH] into_iter is probably better than drain --- src/main.rs | 6 +++--- src/primitives.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6c78fb6..d8eeb9f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -161,7 +161,7 @@ impl GraphicsData { //the models received here are supposed to be tightly packed, i.e. no code needs to check if two models are using the same groups. let indexed_models_len=indexed_models.models.len(); let mut unique_texture_models=Vec::with_capacity(indexed_models_len); - for mut model in indexed_models.models.drain(..){ + for mut model in indexed_models.models.into_iter(){ //convert ModelInstance into ModelGraphicsInstance let instances:Vec=model.instances.iter().map(|instance|{ ModelGraphicsInstance{ @@ -200,7 +200,7 @@ impl GraphicsData { } //de-index models let mut models=Vec::with_capacity(unique_texture_models.len()); - for model in unique_texture_models.drain(..){ + for model in unique_texture_models.into_iter(){ let mut vertices = Vec::new(); let mut index_from_vertex = std::collections::HashMap::new();//:: let mut entities = Vec::new(); @@ -243,7 +243,7 @@ impl GraphicsData { let uniform_buffer_binding_size=::required_limits().max_uniform_buffer_binding_size as usize; let chunk_size=uniform_buffer_binding_size/MODEL_BUFFER_SIZE_BYTES; self.models.reserve(models.len()); - for model in models.drain(..) { + for model in models.into_iter() { instance_count+=model.instances.len(); for instances_chunk in model.instances.rchunks(chunk_size){ model_count+=1; diff --git a/src/primitives.rs b/src/primitives.rs index bdda6cb..6f0e607 100644 --- a/src/primitives.rs +++ b/src/primitives.rs @@ -206,7 +206,7 @@ pub fn generate_partial_unit_cube(face_descriptions:CubeFaceDescription)->crate: let mut groups=Vec::new(); let mut transforms=Vec::new(); //note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices. - for (face,face_description) in face_descriptions.iter(){ + for (face,face_description) in face_descriptions.into_iter(){ //assume that scanning short lists is faster than hashing. let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){ transform_index @@ -321,7 +321,7 @@ pub fn generate_partial_unit_wedge(face_descriptions:WedgeFaceDescription)->crat let mut groups=Vec::new(); let mut transforms=Vec::new(); //note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices. - for (face,face_description) in face_descriptions.iter(){ + for (face,face_description) in face_descriptions.into_iter(){ //assume that scanning short lists is faster than hashing. let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){ transform_index @@ -433,7 +433,7 @@ pub fn generate_partial_unit_cornerwedge(face_descriptions:CornerWedgeFaceDescri let mut groups=Vec::new(); let mut transforms=Vec::new(); //note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices. - for (face,face_description) in face_descriptions.iter(){ + for (face,face_description) in face_descriptions.into_iter(){ //assume that scanning short lists is faster than hashing. let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){ transform_index