Skip to content

Commit

Permalink
avoid extra byte in zio::Writer
Browse files Browse the repository at this point in the history
  • Loading branch information
fafhrd91 committed Jul 26, 2018
1 parent c2cf3cf commit d916363
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 41 deletions.
9 changes: 3 additions & 6 deletions src/gz/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -313,12 +313,9 @@ impl<W: Write> GzDecoder<W> {
}

fn write_buf(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = if let Some(Status::StreamEnd) = self.inner.op_status() {
0
} else {
try!(self.inner.write(buf))
};
if let Some(Status::StreamEnd) = self.inner.op_status() {
let (n, status) = try!(self.inner.write_with_status(buf));

if status == Status::StreamEnd {
if n < buf.len() && self.crc_bytes.len() < 8 {
let d = cmp::min(buf.len(), n + 8 - self.crc_bytes.len());
self.crc_bytes.extend(&buf[n..d]);
Expand Down
14 changes: 7 additions & 7 deletions src/mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ use std::slice;

use libc::{c_int, c_uint};

use Compression;
use ffi;
use Compression;

/// Raw in-memory compression stream for blocks of data.
///
Expand Down Expand Up @@ -104,7 +104,8 @@ pub enum FlushCompress {
/// data has yet to be processed.
Finish = ffi::MZ_FINISH as isize,

#[doc(hidden)] _Nonexhaustive,
#[doc(hidden)]
_Nonexhaustive,
}

#[derive(Copy, Clone, PartialEq, Eq, Debug)]
Expand All @@ -131,7 +132,8 @@ pub enum FlushDecompress {
/// data has yet to be processed.
Finish = ffi::MZ_FINISH as isize,

#[doc(hidden)] _Nonexhaustive,
#[doc(hidden)]
_Nonexhaustive,
}

/// The inner state for an error when decompressing
Expand All @@ -142,7 +144,7 @@ struct DecompressErrorInner {

/// Error returned when a decompression object finds that the input stream of
/// bytes was not a valid input stream of bytes.
#[derive(Debug)]
#[derive(Debug, PartialEq)]
pub struct DecompressError(DecompressErrorInner);

impl DecompressError {
Expand Down Expand Up @@ -276,9 +278,7 @@ impl Compress {
pub fn set_level(&mut self, level: Compression) -> Result<(), CompressError> {
let stream = &mut *self.inner.stream_wrapper;

let rc = unsafe {
ffi::deflateParams(stream, level.0 as c_int, ffi::MZ_DEFAULT_STRATEGY)
};
let rc = unsafe { ffi::deflateParams(stream, level.0 as c_int, ffi::MZ_DEFAULT_STRATEGY) };

match rc {
ffi::MZ_OK => Ok(()),
Expand Down
54 changes: 26 additions & 28 deletions src/zio.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use std::io::prelude::*;
use std::io;
use std::io::prelude::*;
use std::mem;

use {Compress, Decompress, DecompressError, FlushCompress, FlushDecompress, Status};
Expand All @@ -9,7 +9,6 @@ pub struct Writer<W: Write, D: Ops> {
obj: Option<W>,
pub data: D,
buf: Vec<u8>,
op_status: Option<Status>,
}

pub trait Ops {
Expand Down Expand Up @@ -163,7 +162,6 @@ impl<W: Write, D: Ops> Writer<W, D> {
obj: Some(w),
data: d,
buf: Vec::with_capacity(32 * 1024),
op_status: None,
}
}

Expand Down Expand Up @@ -204,27 +202,8 @@ impl<W: Write, D: Ops> Writer<W, D> {
self.obj.is_some()
}

// Status of last Ops operation
pub fn op_status(&self) -> Option<Status> {
self.op_status
}

fn dump(&mut self) -> io::Result<()> {
// TODO: should manage this buffer not with `drain` but probably more of
// a deque-like strategy.
while self.buf.len() > 0 {
let n = self.obj.as_mut().unwrap().write(&self.buf)?;
if n == 0 {
return Err(io::ErrorKind::WriteZero.into());
}
self.buf.drain(..n);
}
Ok(())
}
}

impl<W: Write, D: Ops> Write for Writer<W, D> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// Returns total written bytes and status of underlying codec
pub fn write_with_status(&mut self, buf: &[u8]) -> io::Result<(usize, Status)> {
// miniz isn't guaranteed to actually write any of the buffer provided,
// it may be in a flushing mode where it's just giving us data before
// we're actually giving it any data. We don't want to spuriously return
Expand All @@ -238,13 +217,13 @@ impl<W: Write, D: Ops> Write for Writer<W, D> {
let ret = self.data.run_vec(buf, &mut self.buf, D::Flush::none());
let written = (self.data.total_in() - before_in) as usize;

if buf.len() > 0 && written == 0 && ret.is_ok() {
if buf.len() > 0 && written == 0 && ret.is_ok() && ret != Ok(Status::StreamEnd) {
continue;
}
self.op_status = if let Ok(st) = ret { Some(st) } else { None };
return match ret {
Ok(Status::Ok) | Ok(Status::BufError) | Ok(Status::StreamEnd) => Ok(written),

Ok(st) => match st {
Status::Ok | Status::BufError | Status::StreamEnd => Ok((written, st)),
},
Err(..) => Err(io::Error::new(
io::ErrorKind::InvalidInput,
"corrupt deflate stream",
Expand All @@ -253,6 +232,25 @@ impl<W: Write, D: Ops> Write for Writer<W, D> {
}
}

fn dump(&mut self) -> io::Result<()> {
// TODO: should manage this buffer not with `drain` but probably more of
// a deque-like strategy.
while self.buf.len() > 0 {
let n = try!(self.obj.as_mut().unwrap().write(&self.buf));
if n == 0 {
return Err(io::ErrorKind::WriteZero.into());
}
self.buf.drain(..n);
}
Ok(())
}
}

impl<W: Write, D: Ops> Write for Writer<W, D> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_with_status(buf).map(|res| res.0)
}

fn flush(&mut self) -> io::Result<()> {
self.data
.run_vec(&[], &mut self.buf, D::Flush::sync())
Expand Down

0 comments on commit d916363

Please sign in to comment.