Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ opentelemetry_sdk = "0.31.0"
parking_lot = { version = "0.12.3", features = ["nightly"] }
parquet = "57.1"
paste = "1.0.15"
pco = "0.4.4"
pco = "1.0.1"
pin-project-lite = "0.2.15"
primitive-types = { version = "0.14.0" }
proc-macro2 = "1.0.95"
Expand Down
10 changes: 5 additions & 5 deletions encodings/pco/src/array.rs
Original file line number Diff line number Diff line change
Expand Up @@ -297,10 +297,10 @@ impl PcoArray {
let mut chunk_infos = vec![]; // the Vortex metadata
let mut page_buffers = vec![];
for chunk_start in (0..n_values).step_by(values_per_chunk) {
let cc = match_number_enum!(
let chunk_end = cmp::min(n_values, chunk_start + values_per_chunk);
let mut cc = match_number_enum!(
number_type,
NumberType<T> => {
let chunk_end = cmp::min(n_values, chunk_start + values_per_chunk);
let values = values.to_buffer::<T>();
let chunk = &values.as_slice()[chunk_start..chunk_end];
fc
Expand All @@ -309,8 +309,8 @@ impl PcoArray {
}
);

let mut chunk_meta_buffer = ByteBufferMut::with_capacity(cc.chunk_meta_size_hint());
cc.write_chunk_meta(&mut chunk_meta_buffer)
let mut chunk_meta_buffer = ByteBufferMut::with_capacity(cc.meta_size_hint());
cc.write_meta(&mut chunk_meta_buffer)
.map_err(vortex_err_from_pco)?;
chunk_meta_buffers.push(chunk_meta_buffer.freeze());

Expand Down Expand Up @@ -424,7 +424,7 @@ impl PcoArray {
.page_decompressor(page, page_n_values)
.map_err(vortex_err_from_pco)
.vortex_expect("page_decompressor should succeed with valid page data");
pd.decompress(&mut decompressed_values[old_len..new_len])
pd.read(&mut decompressed_values[old_len..new_len])
.map_err(vortex_err_from_pco)
.vortex_expect("decompress should succeed with valid compressed data");
} else {
Expand Down
46 changes: 29 additions & 17 deletions vortex-layout/src/layouts/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ pub struct CompactCompressor {
/// nvCOMP though doesn't support ZSTD dictionaries. Therefore, we need the option to
/// disable them for compatibility.
zstd_use_dicts: bool,
zstd_values_per_page: usize,
pco_values_per_page: usize,
zstd_values_per_frame: usize,
}

impl CompactCompressor {
Expand All @@ -66,13 +67,22 @@ impl CompactCompressor {
}

/// Sets the number of non-null primitive values to store per
/// separately-decompressible page/frame.
/// separately-decompressible Pco page.
///
/// Fewer values per page can reduce the time to query a small slice of rows, but too
/// few can increase compressed size and (de)compression time. The default is 0, which
/// is used for maximally-large pages.
pub fn with_zstd_values_per_page(mut self, values_per_page: usize) -> Self {
self.zstd_values_per_page = values_per_page;
/// Fewer values per frame can reduce the time to query a small slice of rows, but too
/// few can increase compressed size and (de)compression time.
pub fn with_pco_values_per_page(mut self, values_per_page: usize) -> Self {
self.zstd_values_per_frame = values_per_page;
self
}

/// Sets the number of non-null primitive values to store per
/// separately-decompressible Zstd frame.
///
/// Fewer values per frame can reduce the time to query a small slice of rows, but too
/// few can increase compressed size and (de)compression time.
pub fn with_zstd_values_per_frame(mut self, values_per_page: usize) -> Self {
self.zstd_values_per_frame = values_per_page;
self
}

Expand All @@ -93,21 +103,21 @@ impl CompactCompressor {
let pco_array = PcoArray::from_primitive(
primitive,
self.pco_level,
self.zstd_values_per_page,
self.pco_values_per_page,
)?;
pco_array.into_array()
} else {
let zstd_array = if self.zstd_use_dicts {
ZstdArray::from_primitive(
primitive,
self.zstd_level,
self.zstd_values_per_page,
self.zstd_values_per_frame,
)?
} else {
ZstdArray::from_primitive_without_dict(
primitive,
self.zstd_level,
self.zstd_values_per_page,
self.zstd_values_per_frame,
)?
};
zstd_array.into_array()
Expand Down Expand Up @@ -140,13 +150,13 @@ impl CompactCompressor {
Canonical::VarBinView(vbv) => {
// always zstd
if self.zstd_use_dicts {
ZstdArray::from_var_bin_view(vbv, self.zstd_level, self.zstd_values_per_page)?
ZstdArray::from_var_bin_view(vbv, self.zstd_level, self.zstd_values_per_frame)?
.into_array()
} else {
ZstdArray::from_var_bin_view_without_dict(
vbv,
self.zstd_level,
self.zstd_values_per_page,
self.zstd_values_per_frame,
)?
.into_array()
}
Expand Down Expand Up @@ -224,11 +234,13 @@ impl Default for CompactCompressor {
pco_level: pco::DEFAULT_COMPRESSION_LEVEL,
zstd_level: 3,
zstd_use_dicts: true,
// This is probably high enough to not hurt performance or
// compression. It also currently aligns with the default strategy's
// number of rows per statistic, which allows efficient pushdown
// (but nothing enforces this).
zstd_values_per_page: 8192,
// These values per page/frame amounts are probably high enough to
// not hurt performance or compression. They also currently divide
// the default strategy's number of rows per statistic, which allows
// efficient pushdown in the case of scalar, non-nullable data (but
// nothing enforces this).
pco_values_per_page: 2048,
zstd_values_per_frame: 8192,
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion vortex/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ mod test {

// Or apply generally stronger compression with the compact compressor
let compressed = CompactCompressor::default()
.with_zstd_values_per_page(8192)
.with_zstd_values_per_frame(8192)
.compress(array.as_ref())?;
println!("Compact size: {} / {}", compressed.nbytes(), array.nbytes());
// [compress]
Expand Down
Loading