Skip to content

Commit 748031d

Browse files
Dharin-shahevertlammerts
authored andcommitted
fix typo
1 parent b32e901 commit 748031d

File tree

1 file changed

+4
-14
lines changed

1 file changed

+4
-14
lines changed

tests/fast/spark/test_spark_dataframe.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -432,19 +432,20 @@ def test_dtypes(self, spark):
432432
data = [("Alice", 25, 5000.0), ("Bob", 30, 6000.0)]
433433
df = spark.createDataFrame(data, ["name", "age", "salary"])
434434
dtypes = df.dtypes
435+
435436
assert isinstance(dtypes, list)
436437
assert len(dtypes) == 3
437438
for col_name, col_type in dtypes:
438439
assert isinstance(col_name, str)
439440
assert isinstance(col_type, str)
441+
440442
col_names = [name for name, _ in dtypes]
441443
assert col_names == ["name", "age", "salary"]
442444
for _, col_type in dtypes:
443-
assert len(col_type) > 0 # Should have some type string
445+
assert len(col_type) > 0
444446

445447
def test_dtypes_complex_types(self, spark):
446448
from spark_namespace.sql.types import ArrayType, IntegerType, StringType, StructField, StructType
447-
448449
schema = StructType([
449450
StructField("name", StringType(), True),
450451
StructField("scores", ArrayType(IntegerType()), True),
@@ -453,12 +454,10 @@ def test_dtypes_complex_types(self, spark):
453454
StructField("zip", StringType(), True)
454455
]), True)
455456
])
456-
457457
data = [
458458
("Alice", [90, 85, 88], {"city": "NYC", "zip": "10001"}),
459459
("Bob", [75, 80, 82], {"city": "LA", "zip": "90001"})
460460
]
461-
462461
df = spark.createDataFrame(data, schema)
463462
dtypes = df.dtypes
464463

@@ -472,6 +471,7 @@ def test_printSchema(self, spark, capsys):
472471
df.printSchema()
473472
captured = capsys.readouterr()
474473
output = captured.out
474+
475475
assert "root" in output
476476
assert "name" in output
477477
assert "age" in output
@@ -480,9 +480,7 @@ def test_printSchema(self, spark, capsys):
480480
assert "int" in output.lower() or "bigint" in output.lower()
481481

482482
def test_printSchema_nested(self, spark, capsys):
483-
# Test printSchema with nested schema
484483
from spark_namespace.sql.types import ArrayType, IntegerType, StringType, StructField, StructType
485-
486484
schema = StructType([
487485
StructField("id", IntegerType(), True),
488486
StructField("person", StructType([
@@ -491,30 +489,22 @@ def test_printSchema_nested(self, spark, capsys):
491489
]), True),
492490
StructField("hobbies", ArrayType(StringType()), True)
493491
])
494-
495492
data = [
496493
(1, {"name": "Alice", "age": 25}, ["reading", "coding"]),
497494
(2, {"name": "Bob", "age": 30}, ["gaming", "music"])
498495
]
499-
500496
df = spark.createDataFrame(data, schema)
501-
502-
# Should not raise an error
503497
df.printSchema()
504-
505498
captured = capsys.readouterr()
506499
output = captured.out
507500

508-
# Verify nested structure is shown
509501
assert "root" in output
510502
assert "person" in output
511503
assert "hobbies" in output
512504

513505
def test_printSchema_negative_level(self, spark):
514-
# Test printSchema with invalid level parameter
515506
data = [("Alice", 25)]
516507
df = spark.createDataFrame(data, ["name", "age"])
517508

518-
# Should raise PySparkValueError for negative level
519509
with pytest.raises(PySparkValueError):
520510
df.printSchema(level=-1)

0 commit comments

Comments
 (0)