Skip to content

Commit 2a550e8

Browse files
committed
fix: remove LZO compression option and related test cases
1 parent bceef41 commit 2a550e8

File tree

2 files changed

+1
-11
lines changed

2 files changed

+1
-11
lines changed

python/datafusion/dataframe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -699,10 +699,10 @@ def write_parquet(
699699
- "snappy": Snappy compression.
700700
- "gzip": Gzip compression.
701701
- "brotli": Brotli compression.
702-
- "lzo": LZO compression.
703702
- "lz4": LZ4 compression.
704703
- "lz4_raw": LZ4_RAW compression.
705704
- "zstd": Zstandard compression.
705+
Note: LZO is not yet implemented in arrow-rs and is therefore excluded.
706706
compression_level: Compression level to use. For ZSTD, the
707707
recommended range is 1 to 22, with the default being 4. Higher levels
708708
provide better compression but slower speed.

python/tests/test_dataframe.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1126,16 +1126,6 @@ def test_write_compressed_parquet_default_compression_level(df, tmp_path, compre
11261126
df.write_parquet(str(path), compression=compression)
11271127

11281128

1129-
# lzo is not a valid Compression yet
1130-
# https://github.com/apache/arrow-rs/issues/6970
1131-
# Test write_parquet with lzo compression, should raise an error
1132-
def test_write_compressed_parquet_lzo(df, tmp_path):
1133-
path = tmp_path / "test.parquet"
1134-
1135-
with pytest.raises(ValueError, match="lzo is not a valid Compression"):
1136-
df.write_parquet(str(path), compression="lzo")
1137-
1138-
11391129
def test_dataframe_export(df) -> None:
11401130
# Guarantees that we have the canonical implementation
11411131
# reading our dataframe export

0 commit comments

Comments
 (0)