forked from StrafesNET/strafe-client
into_iter is probably better than drain
This commit is contained in:
parent
6c2eb5ff29
commit
d3e4918d3e
@ -161,7 +161,7 @@ impl GraphicsData {
|
|||||||
//the models received here are supposed to be tightly packed, i.e. no code needs to check if two models are using the same groups.
|
//the models received here are supposed to be tightly packed, i.e. no code needs to check if two models are using the same groups.
|
||||||
let indexed_models_len=indexed_models.models.len();
|
let indexed_models_len=indexed_models.models.len();
|
||||||
let mut unique_texture_models=Vec::with_capacity(indexed_models_len);
|
let mut unique_texture_models=Vec::with_capacity(indexed_models_len);
|
||||||
for mut model in indexed_models.models.drain(..){
|
for mut model in indexed_models.models.into_iter(){
|
||||||
//convert ModelInstance into ModelGraphicsInstance
|
//convert ModelInstance into ModelGraphicsInstance
|
||||||
let instances:Vec<ModelGraphicsInstance>=model.instances.iter().map(|instance|{
|
let instances:Vec<ModelGraphicsInstance>=model.instances.iter().map(|instance|{
|
||||||
ModelGraphicsInstance{
|
ModelGraphicsInstance{
|
||||||
@ -200,7 +200,7 @@ impl GraphicsData {
|
|||||||
}
|
}
|
||||||
//de-index models
|
//de-index models
|
||||||
let mut models=Vec::with_capacity(unique_texture_models.len());
|
let mut models=Vec::with_capacity(unique_texture_models.len());
|
||||||
for model in unique_texture_models.drain(..){
|
for model in unique_texture_models.into_iter(){
|
||||||
let mut vertices = Vec::new();
|
let mut vertices = Vec::new();
|
||||||
let mut index_from_vertex = std::collections::HashMap::new();//::<IndexedVertex,usize>
|
let mut index_from_vertex = std::collections::HashMap::new();//::<IndexedVertex,usize>
|
||||||
let mut entities = Vec::new();
|
let mut entities = Vec::new();
|
||||||
@ -243,7 +243,7 @@ impl GraphicsData {
|
|||||||
let uniform_buffer_binding_size=<GraphicsData as framework::Example>::required_limits().max_uniform_buffer_binding_size as usize;
|
let uniform_buffer_binding_size=<GraphicsData as framework::Example>::required_limits().max_uniform_buffer_binding_size as usize;
|
||||||
let chunk_size=uniform_buffer_binding_size/MODEL_BUFFER_SIZE_BYTES;
|
let chunk_size=uniform_buffer_binding_size/MODEL_BUFFER_SIZE_BYTES;
|
||||||
self.models.reserve(models.len());
|
self.models.reserve(models.len());
|
||||||
for model in models.drain(..) {
|
for model in models.into_iter() {
|
||||||
instance_count+=model.instances.len();
|
instance_count+=model.instances.len();
|
||||||
for instances_chunk in model.instances.rchunks(chunk_size){
|
for instances_chunk in model.instances.rchunks(chunk_size){
|
||||||
model_count+=1;
|
model_count+=1;
|
||||||
|
@ -206,7 +206,7 @@ pub fn generate_partial_unit_cube(face_descriptions:CubeFaceDescription)->crate:
|
|||||||
let mut groups=Vec::new();
|
let mut groups=Vec::new();
|
||||||
let mut transforms=Vec::new();
|
let mut transforms=Vec::new();
|
||||||
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
|
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
|
||||||
for (face,face_description) in face_descriptions.iter(){
|
for (face,face_description) in face_descriptions.into_iter(){
|
||||||
//assume that scanning short lists is faster than hashing.
|
//assume that scanning short lists is faster than hashing.
|
||||||
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
|
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
|
||||||
transform_index
|
transform_index
|
||||||
@ -321,7 +321,7 @@ pub fn generate_partial_unit_wedge(face_descriptions:WedgeFaceDescription)->crat
|
|||||||
let mut groups=Vec::new();
|
let mut groups=Vec::new();
|
||||||
let mut transforms=Vec::new();
|
let mut transforms=Vec::new();
|
||||||
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
|
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
|
||||||
for (face,face_description) in face_descriptions.iter(){
|
for (face,face_description) in face_descriptions.into_iter(){
|
||||||
//assume that scanning short lists is faster than hashing.
|
//assume that scanning short lists is faster than hashing.
|
||||||
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
|
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
|
||||||
transform_index
|
transform_index
|
||||||
@ -433,7 +433,7 @@ pub fn generate_partial_unit_cornerwedge(face_descriptions:CornerWedgeFaceDescri
|
|||||||
let mut groups=Vec::new();
|
let mut groups=Vec::new();
|
||||||
let mut transforms=Vec::new();
|
let mut transforms=Vec::new();
|
||||||
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
|
//note that on a cube every vertex is guaranteed to be unique, so there's no need to hash them against existing vertices.
|
||||||
for (face,face_description) in face_descriptions.iter(){
|
for (face,face_description) in face_descriptions.into_iter(){
|
||||||
//assume that scanning short lists is faster than hashing.
|
//assume that scanning short lists is faster than hashing.
|
||||||
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
|
let transform_index=if let Some(transform_index)=transforms.iter().position(|&transform|transform==face_description.transform){
|
||||||
transform_index
|
transform_index
|
||||||
|
Loading…
Reference in New Issue
Block a user