diff --git a/common/streams/src/stream_limit_by.rs b/common/streams/src/stream_limit_by.rs index 599bbd545e31a..110c84aea354d 100644 --- a/common/streams/src/stream_limit_by.rs +++ b/common/streams/src/stream_limit_by.rs @@ -74,7 +74,6 @@ impl LimitByStream { let array = BooleanArray::from_data(ArrowType::Boolean, filter.into(), None); let chunk = block.clone().try_into()?; let chunk = arrow::compute::filter::filter_chunk(&chunk, &array)?; - //let chunk = chunk.into(); Some(DataBlock::from_chunk(block.schema(), &chunk)).transpose() } } diff --git a/query/src/storages/fuse/io/block_reader.rs b/query/src/storages/fuse/io/block_reader.rs index ae42d614a090c..4aea7841df49e 100644 --- a/query/src/storages/fuse/io/block_reader.rs +++ b/query/src/storages/fuse/io/block_reader.rs @@ -26,7 +26,6 @@ use common_tracing::tracing; use common_tracing::tracing::debug_span; use common_tracing::tracing::Instrument; use futures::future::BoxFuture; -use futures::io::BufReader; use opendal::Operator; use crate::storages::fuse::io::meta_readers::BlockMetaReader; @@ -38,7 +37,6 @@ pub struct BlockReader { arrow_table_schema: ArrowSchema, projection: Vec, file_len: u64, - read_buffer_size: u64, metadata_reader: BlockMetaReader, } @@ -49,7 +47,6 @@ impl BlockReader { table_schema: DataSchemaRef, projection: Vec, file_len: u64, - read_buffer_size: u64, reader: BlockMetaReader, ) -> Self { let block_schema = Arc::new(table_schema.project(projection.clone())); @@ -61,7 +58,6 @@ impl BlockReader { arrow_table_schema, projection, file_len, - read_buffer_size, metadata_reader: reader, } } @@ -84,7 +80,6 @@ impl BlockReader { let arrow_fields = &self.arrow_table_schema.fields; let stream_len = self.file_len; - let read_buffer_size = self.read_buffer_size; let parquet_fields = metadata.schema().fields(); // read_columns_many_async use field name to filter columns @@ -105,12 +100,10 @@ impl BlockReader { let data_accessor = self.data_accessor.clone(); let path = self.path.clone(); Box::pin(async move { - let reader = data_accessor + Ok(data_accessor .object(path.as_str()) .reader() - .total_size(stream_len); - let reader = BufReader::with_capacity(read_buffer_size as usize, reader); - Ok(reader) + .total_size(stream_len)) }) as BoxFuture<_> }; diff --git a/query/src/storages/fuse/operations/read.rs b/query/src/storages/fuse/operations/read.rs index cb4a5fe8b6dac..147eff5cc8af2 100644 --- a/query/src/storages/fuse/operations/read.rs +++ b/query/src/storages/fuse/operations/read.rs @@ -63,7 +63,6 @@ impl FuseTable { let part_stream = futures::stream::iter(iter); - let read_buffer_size = ctx.get_settings().get_storage_read_buffer_size()?; let stream = part_stream .map(move |part| { let da = operator.clone(); @@ -81,7 +80,6 @@ impl FuseTable { table_schema, projection, part_len, - read_buffer_size, reader, ); block_reader.read().await.map_err(|e| {