Skip to content

Commit

Permalink
feature: ensure we are not being lied to about total_blobs_size
Browse files Browse the repository at this point in the history
If e.g. somebody sets total_blobs_size to 1000 but then sends us a huge
collection, before it would download it all. Now it will fail as soon as
total_blobs_size is exceeded.
  • Loading branch information
rklaehn committed Feb 2, 2023
1 parent 6658d88 commit 6d66fc5
Showing 1 changed file with 14 additions and 7 deletions.
21 changes: 14 additions & 7 deletions src/get.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,21 @@ where
on_collection(collection.clone()).await?;

// expect to get blob data in the order they appear in the collection
let mut remaining_size = total_blobs_size;
for blob in collection.blobs {
let blob_reader =
let mut blob_reader =
handle_blob_response(blob.hash, reader, &mut in_buffer).await?;

let size = blob_reader.read_size().await?;
anyhow::ensure!(
size <= MAX_DATA_SIZE,
"size too large: {size} > {MAX_DATA_SIZE}"
);
anyhow::ensure!(
size <= remaining_size,
"downloaded more than {total_blobs_size}"
);
remaining_size -= size;
let blob_reader =
on_blob(blob.hash, blob_reader, Some(blob.name)).await?;
reader = blob_reader.into_inner();
Expand Down Expand Up @@ -215,12 +227,7 @@ async fn handle_blob_response<
// next blob in collection will be sent over
Res::Found => {
assert!(buffer.is_empty());
let mut decoder = AsyncSliceDecoder::new(reader, hash, 0, u64::MAX);
let size = decoder.read_size().await?;
anyhow::ensure!(
size <= MAX_DATA_SIZE,
"size too large: {size} > {MAX_DATA_SIZE}"
);
let decoder = AsyncSliceDecoder::new(reader, hash, 0, u64::MAX);
Ok(decoder)
}
}
Expand Down

0 comments on commit 6d66fc5

Please sign in to comment.