diff --git a/examples/parquet_read_async.rs b/examples/parquet_read_async.rs index 2428b368869..3f1abc4faf6 100644 --- a/examples/parquet_read_async.rs +++ b/examples/parquet_read_async.rs @@ -43,7 +43,7 @@ async fn main() -> Result<()> { // the runtime. // Furthermore, this operation is trivially paralellizable e.g. via rayon, as each iterator // can be advanced in parallel (parallel decompression and deserialization). - let chunks = RowGroupDeserializer::new(column_chunks, row_group.num_rows() as usize, None); + let chunks = RowGroupDeserializer::new(column_chunks, row_group.num_rows(), None); for maybe_chunk in chunks { let chunk = maybe_chunk?; println!("{}", chunk.len()); diff --git a/examples/s3/src/main.rs b/examples/s3/src/main.rs index 5aa24431dd9..d3f668d4421 100644 --- a/examples/s3/src/main.rs +++ b/examples/s3/src/main.rs @@ -71,7 +71,7 @@ async fn main() -> Result<()> { // this is CPU-bounded and should be sent to a separate thread-pool. // We do it here for simplicity - let chunks = read::RowGroupDeserializer::new(column_chunks, group.num_rows() as usize, None); + let chunks = read::RowGroupDeserializer::new(column_chunks, group.num_rows(), None); let chunks = chunks.collect::>>()?; // this is a single chunk because chunk_size is `None` diff --git a/src/io/parquet/read/row_group.rs b/src/io/parquet/read/row_group.rs index 4dde21e467d..59fba886a65 100644 --- a/src/io/parquet/read/row_group.rs +++ b/src/io/parquet/read/row_group.rs @@ -260,8 +260,6 @@ pub async fn read_columns_many_async< field_columns .into_iter() .zip(fields.into_iter()) - .map(|(columns, field)| { - to_deserializer(columns, field, row_group.num_rows() as usize, chunk_size) - }) + .map(|(columns, field)| to_deserializer(columns, field, row_group.num_rows(), chunk_size)) .collect() } diff --git a/tests/it/io/parquet/read_indexes.rs b/tests/it/io/parquet/read_indexes.rs index 68f09bae804..328a826eaf0 100644 --- a/tests/it/io/parquet/read_indexes.rs +++ b/tests/it/io/parquet/read_indexes.rs @@ -125,7 +125,7 @@ fn read_with_indexes( vec![&c1.descriptor().descriptor.primitive_type], schema.fields[1].clone(), None, - row_group.num_rows() as usize, + row_group.num_rows(), )?; let arrays = arrays.collect::>>()?; diff --git a/tests/it/io/parquet/write_async.rs b/tests/it/io/parquet/write_async.rs index 5644caf67f5..000c39ca64c 100644 --- a/tests/it/io/parquet/write_async.rs +++ b/tests/it/io/parquet/write_async.rs @@ -63,7 +63,7 @@ async fn test_parquet_async_roundtrip() { let column_chunks = read_columns_many_async(factory, group, schema.fields.clone(), None) .await .unwrap(); - let chunks = RowGroupDeserializer::new(column_chunks, group.num_rows() as usize, None); + let chunks = RowGroupDeserializer::new(column_chunks, group.num_rows(), None); let mut chunks = chunks.collect::>>().unwrap(); out.append(&mut chunks); }