@@ -130,7 +130,7 @@ def test_spark_df_source(self):
130130 path_table ,
131131 tile_dimensions = (512 , 512 ),
132132 catalog_col_names = catalog_columns ,
133- lazy_tiles = True # We'll get an OOM error if we try to read 9 scenes all at once!
133+ lazy_tiles = True # We'll get an OOM error if we try to read 9 scenes all at once!
134134 )
135135
136136 self .assertTrue (len (path_df .columns ) == 6 ) # three bands times {path, tile}
@@ -170,11 +170,16 @@ def test_pandas_source(self):
170170
171171 def test_csv_string (self ):
172172
173- s = f"""metadata,b1,b2
174- a,{ self .path (1 ,1 )} ,{ self .path (1 ,2 )}
175- b,{ self .path (2 ,1 )} ,{ self .path (2 ,2 )}
176- c,{ self .path (3 ,1 )} ,{ self .path (3 ,2 )}
177- """
173+ s = """metadata,b1,b2
174+ a,{},{}
175+ b,{},{}
176+ c,{},{}
177+ """ .format (
178+ self .path (1 , 1 ), self .path (1 , 2 ),
179+ self .path (2 , 1 ), self .path (2 , 2 ),
180+ self .path (3 , 1 ), self .path (3 , 2 ),
181+ )
182+
178183 df = self .spark .read .raster (s , ['b1' , 'b2' ])
179184 self .assertEqual (len (df .columns ), 3 + 2 ) # number of columns in original DF plus cardinality of catalog_col_names
180185 self .assertTrue (len (df .take (1 )))
0 commit comments