|
34 | 34 |
|
35 | 35 | def read_parquet( |
36 | 36 | path: str | pathlib.Path, |
37 | | - table_partition_cols: list[tuple[str, pa.DataType]] | None = None, |
| 37 | + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, |
38 | 38 | parquet_pruning: bool = True, |
39 | 39 | file_extension: str = ".parquet", |
40 | 40 | skip_metadata: bool = True, |
@@ -83,7 +83,7 @@ def read_json( |
83 | 83 | schema: pa.Schema | None = None, |
84 | 84 | schema_infer_max_records: int = 1000, |
85 | 85 | file_extension: str = ".json", |
86 | | - table_partition_cols: list[tuple[str, pa.DataType]] | None = None, |
| 86 | + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, |
87 | 87 | file_compression_type: str | None = None, |
88 | 88 | ) -> DataFrame: |
89 | 89 | """Read a line-delimited JSON data source. |
@@ -124,7 +124,7 @@ def read_csv( |
124 | 124 | delimiter: str = ",", |
125 | 125 | schema_infer_max_records: int = 1000, |
126 | 126 | file_extension: str = ".csv", |
127 | | - table_partition_cols: list[tuple[str, pa.DataType]] | None = None, |
| 127 | + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, |
128 | 128 | file_compression_type: str | None = None, |
129 | 129 | ) -> DataFrame: |
130 | 130 | """Read a CSV data source. |
@@ -171,7 +171,7 @@ def read_csv( |
171 | 171 | def read_avro( |
172 | 172 | path: str | pathlib.Path, |
173 | 173 | schema: pa.Schema | None = None, |
174 | | - file_partition_cols: list[tuple[str, str]] | None = None, |
| 174 | + file_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, |
175 | 175 | file_extension: str = ".avro", |
176 | 176 | ) -> DataFrame: |
177 | 177 | """Create a :py:class:`DataFrame` for reading Avro data source. |
|
0 commit comments