From 41e084785df506ea1739f77a2e71f4af1c5e8378 Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Tue, 24 Feb 2026 14:01:29 +0000 Subject: [PATCH] Verify DeflateEncoder::with_buffer_size Added `tests/buffer_size_test.rs` to verify that `DeflateEncoder` flushes data when the internal buffer exceeds the configured size. Fixed a bug in `src/compress/mod.rs` where `compress_loop` would return `InsufficientSpace` for empty inputs with `FlushMode::Finish` if the compressor wrote only bits (e.g., EOB marker) without advancing the byte output index. The fix ensures that both byte index and bit count are checked to detect progress. This ensures reliable stream termination and flushing behavior for custom buffer configurations. Co-authored-by: 404Setup <153366651+404Setup@users.noreply.github.com> --- src/compress/mod.rs | 3 +- tests/buffer_size_test.rs | 67 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 tests/buffer_size_test.rs diff --git a/src/compress/mod.rs b/src/compress/mod.rs index 110221e..9b8cec4 100644 --- a/src/compress/mod.rs +++ b/src/compress/mod.rs @@ -649,12 +649,13 @@ impl Compressor { if in_idx == 0 && flush_mode == FlushMode::Finish { let start_out = bs.out_idx; + let start_bitcount = bs.bitcount; if self.compression_level >= 10 { self.compress_near_optimal_block(mf, input, 0, bs, true); } else { self.compress_greedy_block(mf, input, 0, bs, 0, true); } - if bs.out_idx == start_out { + if bs.out_idx == start_out && bs.bitcount == start_bitcount { mf.advance(input.len()); return (CompressResult::InsufficientSpace, 0, 0); } diff --git a/tests/buffer_size_test.rs b/tests/buffer_size_test.rs new file mode 100644 index 0000000..82a2c23 --- /dev/null +++ b/tests/buffer_size_test.rs @@ -0,0 +1,67 @@ +use libdeflate::stream::DeflateEncoder; +use std::io::Write; +use std::sync::{Arc, Mutex}; + +#[derive(Clone)] +struct SharedWriter { + data: Arc>>, +} + +impl Write for SharedWriter { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + let mut data = self.data.lock().unwrap(); + data.extend_from_slice(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + +#[test] +fn test_with_buffer_size() { + let writer_data = Arc::new(Mutex::new(Vec::new())); + let writer = SharedWriter { + data: writer_data.clone(), + }; + + let buffer_size = 100; + // Buffer size 100, we write 150 bytes. + let mut encoder = DeflateEncoder::new(writer, 1).with_buffer_size(buffer_size); + + let data = vec![0u8; 150]; + encoder.write_all(&data).unwrap(); + + // The buffer size (100) is exceeded by 150 bytes, so flush_buffer(false) should be called. + // flush_buffer compresses and writes to the underlying writer. + let compressed_len = writer_data.lock().unwrap().len(); + assert!(compressed_len > 0, "Encoder should have flushed when buffer limit was exceeded"); + + // Finish the stream + encoder.finish().unwrap(); + + let final_len = writer_data.lock().unwrap().len(); + assert!(final_len > compressed_len, "Finish should write more data (footer/final block)"); +} + +#[test] +fn test_default_buffer_size() { + let writer_data = Arc::new(Mutex::new(Vec::new())); + let writer = SharedWriter { + data: writer_data.clone(), + }; + + // Default buffer size is usually large (e.g. 1MB). + let mut encoder = DeflateEncoder::new(writer, 1); + + let data = vec![0u8; 150]; + encoder.write_all(&data).unwrap(); + + // Should not have flushed yet as 150 < default buffer size + let compressed_len = writer_data.lock().unwrap().len(); + assert_eq!(compressed_len, 0, "Encoder should NOT have flushed with default buffer size"); + + encoder.finish().unwrap(); + assert!(writer_data.lock().unwrap().len() > 0); +}