Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
443 changes: 206 additions & 237 deletions native/Cargo.lock

Large diffs are not rendered by default.

7 changes: 4 additions & 3 deletions native/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,10 @@ arrow = { version = "57.3.0", features = ["prettyprint", "ffi", "chrono-tz"] }
async-trait = { version = "0.1" }
bytes = { version = "1.11.1" }
parquet = { version = "57.2.0", default-features = false, features = ["experimental"] }
datafusion = { version = "51.0.0", default-features = false, features = ["unicode_expressions", "crypto_expressions", "nested_expressions", "parquet"] }
datafusion-datasource = { version = "51.0.0" }
datafusion-spark = { version = "51.0.0" }
datafusion = { version = "52.0.0", default-features = false, features = ["unicode_expressions", "crypto_expressions", "nested_expressions", "parquet"] }
datafusion-datasource = { version = "52.0.0" }
datafusion-spark = { version = "52.0.0" }
datafusion-physical-expr-adapter = { version = "52.0.0" }
datafusion-comet-spark-expr = { path = "spark-expr" }
datafusion-comet-proto = { path = "proto" }
chrono = { version = "0.4", default-features = false, features = ["clock"] }
Expand Down
3 changes: 2 additions & 1 deletion native/core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ tempfile = "3.24.0"
itertools = "0.14.0"
paste = "1.0.14"
datafusion = { workspace = true, features = ["parquet_encryption", "sql"] }
datafusion-physical-expr-adapter = { workspace = true }
datafusion-datasource = { workspace = true }
datafusion-spark = { workspace = true }
once_cell = "1.18.0"
Expand Down Expand Up @@ -95,7 +96,7 @@ jni = { version = "0.21", features = ["invocation"] }
lazy_static = "1.4"
assertables = "9"
hex = "0.4.3"
datafusion-functions-nested = { version = "51.0.0" }
datafusion-functions-nested = { version = "52.0.0" }

[features]
backtrace = ["datafusion/backtrace"]
Expand Down
4 changes: 2 additions & 2 deletions native/core/src/execution/expressions/subquery.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ impl PhysicalExpr for Subquery {
self
}

fn fmt_sql(&self, _: &mut Formatter<'_>) -> std::fmt::Result {
unimplemented!()
fn fmt_sql(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
Display::fmt(self, f)
}

fn data_type(&self, _: &Schema) -> datafusion::common::Result<DataType> {
Expand Down
62 changes: 32 additions & 30 deletions native/core/src/execution/operators/csv_scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,64 +16,66 @@
// under the License.

use crate::execution::operators::ExecutionError;
use arrow::datatypes::{Field, SchemaRef};
use arrow::datatypes::SchemaRef;
use datafusion::common::config::CsvOptions as DFCsvOptions;
use datafusion::common::DataFusionError;
use datafusion::common::Result;
use datafusion::datasource::object_store::ObjectStoreUrl;
use datafusion::datasource::physical_plan::CsvSource;
use datafusion_comet_proto::spark_operator::CsvOptions;
use datafusion_datasource::file_groups::FileGroup;
use datafusion_datasource::file_scan_config::{FileScanConfig, FileScanConfigBuilder};
use datafusion_datasource::file_scan_config::FileScanConfigBuilder;
use datafusion_datasource::source::DataSourceExec;
use datafusion_datasource::PartitionedFile;
use itertools::Itertools;
use std::sync::Arc;

pub fn init_csv_datasource_exec(
object_store_url: ObjectStoreUrl,
file_groups: Vec<Vec<PartitionedFile>>,
data_schema: SchemaRef,
partition_schema: SchemaRef,
_partition_schema: SchemaRef,
projection_vector: Vec<usize>,
csv_options: &CsvOptions,
) -> Result<Arc<DataSourceExec>, ExecutionError> {
let csv_source = build_csv_source(csv_options.clone());
let csv_source = build_csv_source(data_schema, csv_options)?;

let file_groups = file_groups
.iter()
.map(|files| FileGroup::new(files.clone()))
.collect();

let partition_fields = partition_schema
.fields()
.iter()
.map(|field| Field::new(field.name(), field.data_type().clone(), field.is_nullable()))
.collect_vec();

let file_scan_config: FileScanConfig =
FileScanConfigBuilder::new(object_store_url, data_schema, csv_source)
.with_file_groups(file_groups)
.with_table_partition_cols(partition_fields)
.with_projection_indices(Some(projection_vector))
.build();
let file_scan_config = FileScanConfigBuilder::new(object_store_url, csv_source)
.with_file_groups(file_groups)
.with_projection_indices(Some(projection_vector))?
.build();

Ok(Arc::new(DataSourceExec::new(Arc::new(file_scan_config))))
Ok(DataSourceExec::from_data_source(file_scan_config))
}

fn build_csv_source(options: CsvOptions) -> Arc<CsvSource> {
let delimiter = string_to_u8(&options.delimiter, "delimiter").unwrap();
let quote = string_to_u8(&options.quote, "quote").unwrap();
let escape = string_to_u8(&options.escape, "escape").unwrap();
let terminator = string_to_u8(&options.terminator, "terminator").unwrap();
fn build_csv_source(schema: SchemaRef, options: &CsvOptions) -> Result<Arc<CsvSource>> {
let delimiter = string_to_u8(&options.delimiter, "delimiter")?;
let quote = string_to_u8(&options.quote, "quote")?;
let escape = string_to_u8(&options.escape, "escape")?;
let terminator = string_to_u8(&options.terminator, "terminator")?;
let comment = options
.comment
.map(|c| string_to_u8(&c, "comment").unwrap());
let csv_source = CsvSource::new(options.has_header, delimiter, quote)
.with_escape(Some(escape))
.with_comment(comment)
.with_terminator(Some(terminator))
.with_truncate_rows(options.truncated_rows);
Arc::new(csv_source)
.as_ref()
.map(|c| string_to_u8(c, "comment"))
.transpose()?;

let df_csv_options = DFCsvOptions {
has_header: Some(options.has_header),
delimiter,
quote,
escape: Some(escape),
terminator: Some(terminator),
comment,
truncated_rows: Some(options.truncated_rows),
..Default::default()
};

let csv_source = CsvSource::new(schema).with_csv_options(df_csv_options);
Ok(Arc::new(csv_source))
}

fn string_to_u8(option: &str, option_name: &str) -> Result<u8> {
Expand Down
51 changes: 7 additions & 44 deletions native/core/src/execution/operators/iceberg_scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,7 @@ use iceberg::io::FileIO;

use crate::execution::operators::ExecutionError;
use crate::parquet::parquet_support::SparkParquetOptions;
use crate::parquet::schema_adapter::SparkSchemaAdapterFactory;
use datafusion::datasource::schema_adapter::{SchemaAdapterFactory, SchemaMapper};
use crate::parquet::schema_adapter::adapt_batch_with_expressions;
use datafusion_comet_spark_expr::EvalMode;
use iceberg::scan::FileScanTask;

Expand Down Expand Up @@ -169,16 +168,14 @@ impl IcebergScanExec {
})?;

let spark_options = SparkParquetOptions::new(EvalMode::Legacy, "UTC", false);
let adapter_factory = SparkSchemaAdapterFactory::new(spark_options, None);

let adapted_stream =
stream.map_err(|e| DataFusionError::Execution(format!("Iceberg scan error: {}", e)));

let wrapped_stream = IcebergStreamWrapper {
inner: adapted_stream,
schema: output_schema,
cached_adapter: None,
adapter_factory,
spark_options,
baseline_metrics: metrics.baseline,
};

Expand Down Expand Up @@ -221,15 +218,12 @@ impl IcebergScanMetrics {

/// Wrapper around iceberg-rust's stream that performs schema adaptation.
/// Handles batches from multiple files that may have different Arrow schemas
/// (metadata, field IDs, etc.). Caches schema adapters by source schema to avoid
/// recreating them for every batch from the same file.
/// (metadata, field IDs, etc.).
struct IcebergStreamWrapper<S> {
inner: S,
schema: SchemaRef,
/// Cached schema adapter with its source schema. Created when schema changes.
cached_adapter: Option<(SchemaRef, Arc<dyn SchemaMapper>)>,
/// Factory for creating schema adapters
adapter_factory: SparkSchemaAdapterFactory,
/// Spark parquet options for schema adaptation
spark_options: SparkParquetOptions,
/// Metrics for output tracking
baseline_metrics: BaselineMetrics,
}
Expand All @@ -245,40 +239,9 @@ where

let result = match poll_result {
Poll::Ready(Some(Ok(batch))) => {
let file_schema = batch.schema();

// Check if we need to create a new adapter for this file's schema
let needs_new_adapter = match &self.cached_adapter {
Some((cached_schema, _)) => !Arc::ptr_eq(cached_schema, &file_schema),
None => true,
};

if needs_new_adapter {
let adapter = self
.adapter_factory
.create(Arc::clone(&self.schema), Arc::clone(&file_schema));

match adapter.map_schema(file_schema.as_ref()) {
Ok((schema_mapper, _projection)) => {
self.cached_adapter = Some((file_schema, schema_mapper));
}
Err(e) => {
return Poll::Ready(Some(Err(DataFusionError::Execution(format!(
"Schema mapping failed: {}",
e
)))));
}
}
}

let result = self
.cached_adapter
.as_ref()
.expect("cached_adapter should be initialized")
.1
.map_batch(batch)
let result = adapt_batch_with_expressions(batch, &self.schema, &self.spark_options)
.map_err(|e| {
DataFusionError::Execution(format!("Batch mapping failed: {}", e))
DataFusionError::Execution(format!("Batch adaptation failed: {}", e))
});

Poll::Ready(Some(result))
Expand Down
9 changes: 7 additions & 2 deletions native/core/src/execution/operators/scan.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use crate::{
},
jvm_bridge::{jni_call, JVMClasses},
};
use arrow::array::{make_array, ArrayData, ArrayRef, RecordBatch, RecordBatchOptions};
use arrow::array::{make_array, Array, ArrayData, ArrayRef, RecordBatch, RecordBatchOptions};
use arrow::compute::{cast_with_options, take, CastOptions};
use arrow::datatypes::{DataType, Field, Schema, SchemaRef};
use arrow::ffi::FFI_ArrowArray;
Expand Down Expand Up @@ -94,6 +94,7 @@ impl ScanExec {

// Build schema directly from data types since get_next now always unpacks dictionaries
let schema = schema_from_data_types(&data_types);
// dbg!(&schema);

let cache = PlanProperties::new(
EquivalenceProperties::new(Arc::clone(&schema)),
Expand Down Expand Up @@ -209,6 +210,8 @@ impl ScanExec {

let array = make_array(array_data);

// dbg!(&array, &selection_indices_arrays);

// Apply selection if selection vectors exist (applies to all columns)
let array = if let Some(ref selection_arrays) = selection_indices_arrays {
let indices = &selection_arrays[i];
Expand Down Expand Up @@ -487,7 +490,7 @@ impl ScanStream<'_> {
) -> DataFusionResult<RecordBatch, DataFusionError> {
let schema_fields = self.schema.fields();
assert_eq!(columns.len(), schema_fields.len());

// dbg!(&columns, &self.schema);
// Cast dictionary-encoded primitive arrays to regular arrays and cast
// Utf8/LargeUtf8/Binary arrays to dictionary-encoded if the schema is
// defined as dictionary-encoded and the data in this batch is not
Expand All @@ -507,6 +510,7 @@ impl ScanStream<'_> {
})
.collect::<Result<Vec<_>, _>>()?;
let options = RecordBatchOptions::new().with_row_count(Some(num_rows));
// dbg!(&new_columns, &self.schema);
RecordBatch::try_new_with_options(Arc::clone(&self.schema), new_columns, &options)
.map_err(|e| arrow_datafusion_err!(e))
}
Expand All @@ -517,6 +521,7 @@ impl Stream for ScanStream<'_> {

fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut timer = self.baseline_metrics.elapsed_compute().timer();
// dbg!(&self.scan);
let mut scan_batch = self.scan.batch.try_lock().unwrap();

let input_batch = &*scan_batch;
Expand Down
Loading
Loading