@@ -3,7 +3,6 @@ from __future__ import annotations
33from pathlib import Path
44from typing import (
55 BinaryIO ,
6- Dict ,
76 List ,
87 Optional ,
98 Sequence ,
@@ -24,15 +23,15 @@ from .enums import GeoParquetEncoding
2423from .types import BboxCovering , GeoParquetEncodingT
2524
2625class ParquetFile :
27- def __init__ (self , path : str , fs : ObjectStore ) -> None :
26+ def __init__ (self , path : str , store : ObjectStore ) -> None :
2827 """
2928 Construct a new ParquetFile
3029
3130 This will synchronously fetch metadata from the provided path
3231
3332 Args:
3433 path: a string URL to read from.
35- fs : the file system interface to read from.
34+ store : the file system interface to read from.
3635
3736 Returns:
3837 A new ParquetFile object.
@@ -133,15 +132,15 @@ class ParquetFile:
133132 """
134133
135134class ParquetDataset :
136- def __init__ (self , paths : Sequence [str ], fs : ObjectStore ) -> None :
135+ def __init__ (self , paths : Sequence [str ], store : ObjectStore ) -> None :
137136 """
138137 Construct a new ParquetDataset
139138
140139 This will synchronously fetch metadata from all listed files.
141140
142141 Args:
143142 paths: a list of string URLs to read from.
144- fs : the file system interface to read from.
143+ store : the file system interface to read from.
145144
146145 Returns:
147146 A new ParquetDataset object.
@@ -241,9 +240,6 @@ class ParquetWriter:
241240 table: _description_
242241 """
243242
244- class ObjectStore :
245- def __init__ (self , root : str , options : Optional [Dict [str , str ]] = None ) -> None : ...
246-
247243def read_csv (
248244 file : str | Path | BinaryIO ,
249245 geometry_column_name : str ,
@@ -265,7 +261,7 @@ def read_csv(
265261def read_flatgeobuf (
266262 file : Union [str , Path , BinaryIO ],
267263 * ,
268- fs : Optional [ObjectStore ] = None ,
264+ store : Optional [ObjectStore ] = None ,
269265 batch_size : int = 65536 ,
270266 bbox : Tuple [float , float , float , float ] | None = None ,
271267) -> Table :
@@ -309,15 +305,15 @@ def read_flatgeobuf(
309305 "aws_secret_access_key": "...",
310306 "aws_region": "..."
311307 }
312- fs = ObjectStore('s3://bucket', options=options)
313- table = read_flatgeobuf("path/in/bucket.fgb", fs=fs )
308+ store = ObjectStore('s3://bucket', options=options)
309+ table = read_flatgeobuf("path/in/bucket.fgb", store=store )
314310 ```
315311
316312 Args:
317313 file: the path to the file or a Python file object in binary read mode.
318314
319315 Other args:
320- fs : an ObjectStore instance for this url. This is required only if the file is at a remote
316+ store : an ObjectStore instance for this url. This is required only if the file is at a remote
321317 location.
322318 batch_size: the number of rows to include in each internal batch of the table.
323319 bbox: A spatial filter for reading rows, of the format (minx, miny, maxx, maxy). If set to
@@ -330,7 +326,7 @@ def read_flatgeobuf(
330326async def read_flatgeobuf_async (
331327 path : str ,
332328 * ,
333- fs : Optional [ObjectStore ] = None ,
329+ store : Optional [ObjectStore ] = None ,
334330 batch_size : int = 65536 ,
335331 bbox : Tuple [float , float , float , float ] | None = None ,
336332) -> Table :
@@ -358,17 +354,17 @@ async def read_flatgeobuf_async(
358354 "aws_secret_access_key": "...",
359355 "aws_region": "..."
360356 }
361- fs = ObjectStore('s3://bucket', options=options)
362- table = await read_flatgeobuf_async("path/in/bucket.fgb", fs=fs )
357+ store = ObjectStore('s3://bucket', options=options)
358+ table = await read_flatgeobuf_async("path/in/bucket.fgb", store=store )
363359 ```
364360
365361 Args:
366362 path: the url or relative path to a remote FlatGeobuf file. If an argument is passed for
367- `fs `, this should be a path fragment relative to the root passed to the `ObjectStore`
363+ `store `, this should be a path fragment relative to the root passed to the `ObjectStore`
368364 constructor.
369365
370366 Other args:
371- fs : an ObjectStore instance for this url. This is required for non-HTTP urls.
367+ store : an ObjectStore instance for this url. This is required for non-HTTP urls.
372368 batch_size: the number of rows to include in each internal batch of the table.
373369 bbox: A spatial filter for reading rows, of the format (minx, miny, maxx, maxy). If set to
374370 `None`, no spatial filtering will be performed.
@@ -409,7 +405,7 @@ def read_geojson_lines(
409405def read_parquet (
410406 path : Union [str , Path , BinaryIO ],
411407 * ,
412- fs : Optional [ObjectStore ] = None ,
408+ store : Optional [ObjectStore ] = None ,
413409 batch_size : int = 65536 ,
414410) -> Table :
415411 """
@@ -443,13 +439,13 @@ def read_parquet(
443439 "aws_secret_access_key": "...",
444440 "aws_region": "..."
445441 }
446- fs = ObjectStore('s3://bucket', options=options)
447- table = read_parquet("path/in/bucket.parquet", fs=fs )
442+ store = ObjectStore('s3://bucket', options=options)
443+ table = read_parquet("path/in/bucket.parquet", store=store )
448444 ```
449445
450446 Args:
451447 path: the path to the file
452- fs : the ObjectStore to read from. Defaults to None.
448+ store : the ObjectStore to read from. Defaults to None.
453449 batch_size: the number of rows to include in each internal batch of the table.
454450
455451 Returns:
@@ -459,7 +455,7 @@ def read_parquet(
459455async def read_parquet_async (
460456 path : Union [str , Path , BinaryIO ],
461457 * ,
462- fs : Optional [ObjectStore ] = None ,
458+ store : Optional [ObjectStore ] = None ,
463459 batch_size : int = 65536 ,
464460) -> Table :
465461 """
@@ -486,13 +482,13 @@ async def read_parquet_async(
486482 "aws_secret_access_key": "...",
487483 "aws_region": "..."
488484 }
489- fs = ObjectStore('s3://bucket', options=options)
490- table = await read_parquet_async("path/in/bucket.parquet", fs=fs )
485+ store = ObjectStore('s3://bucket', options=options)
486+ table = await read_parquet_async("path/in/bucket.parquet", store=store )
491487 ```
492488
493489 Args:
494490 path: the path to the file
495- fs : the ObjectStore to read from. Defaults to None.
491+ store : the ObjectStore to read from. Defaults to None.
496492 batch_size: the number of rows to include in each internal batch of the table.
497493
498494 Returns:
0 commit comments