From 42a6235690c688e74d573976e08dc808e5f3d725 Mon Sep 17 00:00:00 2001 From: Floris Bruynooghe Date: Wed, 22 Feb 2023 09:47:32 +0100 Subject: [PATCH] feat: Remove MAX_DATA_LIMIT (#780) We used to limit the maximum amount of data that can be transferred, both the total size as well as single blob size. There is no real benefit to this, we pass through the reader and do not buffer all this data at once. And avoiding to fill up the user's disk space is not our concern. --- src/get.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/src/get.rs b/src/get.rs index 96cd095589..8f52f7f6a2 100644 --- a/src/get.rs +++ b/src/get.rs @@ -15,7 +15,7 @@ use crate::protocol::{ }; use crate::tls::{self, Keypair, PeerId}; use abao::decode::AsyncSliceDecoder; -use anyhow::{anyhow, bail, ensure, Result}; +use anyhow::{anyhow, bail, Result}; use bytes::BytesMut; use futures::Future; use postcard::experimental::max_size::MaxSize; @@ -24,8 +24,6 @@ use tracing::{debug, error}; pub use crate::util::Hash; -const MAX_DATA_SIZE: u64 = 1024 * 1024 * 1024; - /// Options for the client #[derive(Clone, Debug)] pub struct Options { @@ -181,13 +179,6 @@ where match response.data { // server is sending over a collection of blobs Res::FoundCollection { total_blobs_size } => { - ensure!( - total_blobs_size <= MAX_DATA_SIZE, - "size too large: {} > {}", - total_blobs_size, - MAX_DATA_SIZE - ); - data_len = total_blobs_size; // read entire collection data into buffer @@ -204,10 +195,6 @@ where handle_blob_response(blob.hash, reader, &mut in_buffer).await?; let size = blob_reader.read_size().await?; - anyhow::ensure!( - size <= MAX_DATA_SIZE, - "size too large: {size} > {MAX_DATA_SIZE}" - ); anyhow::ensure!( size <= remaining_size, "downloaded more than {total_blobs_size}"