Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 22 additions & 21 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ nix = { version = "0.29", features = ["ioctl"] }
# Enable vendored OpenSSL for cross-compilation to musl targets
# This ensures OpenSSL builds from source with musl compatibility
openssl = { version = "0.10", features = ["vendored"] }
xz2 = "0.1"
liblzma = { version = "0.4", features = ["parallel"] }

[dev-dependencies]
http-body = "1.0.1"
Expand Down
107 changes: 107 additions & 0 deletions src/fls/decompress.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,53 @@
use crate::fls::byte_channel::ByteBoundedReceiver;
use crate::fls::compression::Compression;
use crate::fls::stream_utils::ChannelReader;
use bytes::Bytes;
use std::io::Read;
use tokio::io::AsyncReadExt;
use tokio::process::{Child, Command};
use tokio::sync::mpsc;

pub(crate) fn mb_to_bytes(mb: u64) -> u64 {
mb.saturating_mul(1024 * 1024)
}

pub(crate) fn create_xz_decoder<R: Read>(
reader: R,
memlimit_mb: u64,
) -> Result<liblzma::read::XzDecoder<R>, String> {
let memlimit = mb_to_bytes(memlimit_mb);
let stream = liblzma::stream::Stream::new_stream_decoder(memlimit, 0).map_err(|e| {
format!(
"Failed to create XZ decoder with {}MB limit: {}",
memlimit_mb, e
)
})?;
Ok(liblzma::read::XzDecoder::new_stream(reader, stream))
}

pub(crate) fn create_mt_xz_decoder<R: Read + Send + 'static>(
reader: R,
xz_memlimit_mb: u64,
) -> Result<Box<dyn Read + Send>, String> {
let num_threads = std::thread::available_parallelism()
.map(|n| n.get() as u32)
.unwrap_or(2);
let memlimit = mb_to_bytes(xz_memlimit_mb);
eprintln!(
"XZ decompression: {} threads, memory limit {}MB",
num_threads, xz_memlimit_mb
);
let stream = liblzma::stream::MtStreamBuilder::new()
.threads(num_threads)
.memlimit_threading(memlimit)
.memlimit_stop(memlimit)
.decoder()
.map_err(|e| format!("Failed to create MT XZ decoder: {}", e))?;
Ok(Box::new(liblzma::read::XzDecoder::new_stream(
reader, stream,
)))
}

/// Determines the appropriate decompression command based on URL extension
fn get_decompressor_command(url: &str) -> &'static str {
let extension = url.rsplit('.').next().unwrap_or("").to_lowercase();
Expand Down Expand Up @@ -75,6 +121,67 @@ fn spawn_decompressor(
Ok((process, cmd))
}

pub(crate) fn get_compression_from_url(url: &str) -> Compression {
let path = url.split('?').next().unwrap_or(url);
let path = path.split('#').next().unwrap_or(path);
let extension = path.rsplit('.').next().unwrap_or("").to_lowercase();
match extension.as_str() {
"gz" => Compression::Gzip,
"xz" => Compression::Xz,
"zst" | "zstd" => Compression::Zstd,
_ => Compression::None,
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.
}

type DecompressorResult = (
mpsc::Receiver<Vec<u8>>,
std::thread::JoinHandle<Result<(), String>>,
);

pub(crate) fn start_inprocess_decompressor(
buffer_rx: ByteBoundedReceiver<Bytes>,
compression: Compression,
consumed_progress_tx: mpsc::UnboundedSender<u64>,
xz_memlimit_mb: u64,
) -> Result<DecompressorResult, Box<dyn std::error::Error>> {
let (decompressed_tx, decompressed_rx) = mpsc::channel::<Vec<u8>>(8);
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't Vec a bit unbounded?

LLMs always hit us with their unbound usage of memory, we need to make sure that our memory consumption does not grow unbounded, for example if we have a small block that decompresses as a lot of 0's or similar (unlikely for simg, but likely for non-simg)


let handle = std::thread::Builder::new()
.name("decompressor".to_string())
.spawn(move || {
let channel_reader =
ChannelReader::new_byte_bounded(buffer_rx).with_progress(consumed_progress_tx);

let mut decoder: Box<dyn Read + Send> = match compression {
Compression::Xz => create_mt_xz_decoder(channel_reader, xz_memlimit_mb)?,
Compression::Gzip => Box::new(flate2::read::GzDecoder::new(channel_reader)),
Compression::None => Box::new(channel_reader),
Compression::Zstd => {
return Err("Zstd in-process decompression is not supported".to_string());
}
};

let mut buf = vec![0u8; 8 * 1024 * 1024];
loop {
let n = decoder
.read(&mut buf)
.map_err(|e| format!("Decompression error: {}", e))?;
if n == 0 {
break;
}
if decompressed_tx.blocking_send(buf[..n].to_vec()).is_err() {
return Err("Writer task closed, stopping decompression".to_string());
}
}
Ok(())
})
.map_err(|e| -> Box<dyn std::error::Error> {
format!("Failed to spawn decompressor thread: {}", e).into()
})?;

Ok((decompressed_rx, handle))
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.

pub(crate) async fn spawn_stderr_reader(
mut stderr: tokio::process::ChildStderr,
error_tx: mpsc::UnboundedSender<String>,
Expand Down
Loading
Loading