@@ -149,15 +149,69 @@ def test_http_read_netcdf_simplecache(data_server):
149149# S3
150150#based on: https://github.com/dask/s3fs/blob/master/s3fs/tests/test_s3fs.py
151151test_bucket_name = "test"
152+ PORT_S3 = 8001
153+ endpoint_uri = "http://localhost:%s" % PORT_S3
152154test_files = ['RGB.byte.tif' , 'example_1.nc' ]
153155
154- from s3fs .tests .test_s3fs import s3 , s3_base , endpoint_uri
156+ @pytest .fixture ()
157+ def s3_base ():
158+ # writable local S3 system
159+ import shlex
160+ import subprocess
161+
162+ proc = subprocess .Popen (shlex .split ("moto_server s3 -p %s" % PORT_S3 ),
163+ stderr = subprocess .DEVNULL , stdout = subprocess .DEVNULL , stdin = subprocess .DEVNULL )
164+
165+ timeout = 5
166+ while timeout > 0 :
167+ try :
168+ print ("polling for moto server" )
169+
170+ r = requests .get (endpoint_uri )
171+ if r .ok :
172+ break
173+ except :
174+ pass
175+ timeout -= 0.1
176+ time .sleep (0.1 )
177+ print ("server up" )
178+ yield
179+ print ("moto done" )
180+ proc .terminate ()
181+ proc .wait ()
182+
183+
184+ @pytest .fixture (scope = 'function' )
185+ def aws_credentials ():
186+ """Mocked AWS Credentials for moto."""
187+ os .environ ['AWS_ACCESS_KEY_ID' ] = 'testing'
188+ os .environ ['AWS_SECRET_ACCESS_KEY' ] = 'testing'
189+ os .environ ['AWS_SECURITY_TOKEN' ] = 'testing'
190+ os .environ ['AWS_SESSION_TOKEN' ] = 'testing'
191+
192+
193+ @pytest .fixture ()
194+ def s3 (s3_base , aws_credentials ):
195+ ''' anonymous access local s3 bucket for testing '''
196+ from botocore .session import Session
197+ session = Session ()
198+ client = session .create_client ("s3" , endpoint_url = endpoint_uri )
199+ client .create_bucket (Bucket = test_bucket_name , ACL = "public-read" )
200+
201+ for file_name in [os .path .join (DIRECTORY ,x ) for x in test_files ]:
202+ with open (file_name , 'rb' ) as f :
203+ data = f .read ()
204+ key = os .path .basename (file_name )
205+ client .put_object (Bucket = test_bucket_name , Key = key , Body = data )
206+
207+ # Make sure cache not being used
208+ s3fs .S3FileSystem .clear_instance_cache ()
209+ s3 = s3fs .S3FileSystem (anon = True , client_kwargs = {"endpoint_url" : endpoint_uri })
210+ s3 .invalidate_cache ()
211+ yield
155212
156213
157214def test_s3_list_files (s3 ):
158- for x in test_files :
159- file_name = os .path .join (DIRECTORY ,x )
160- s3 .put (file_name , f"{ test_bucket_name } /{ x } " )
161215 s3 = s3fs .S3FileSystem (anon = True , client_kwargs = {"endpoint_url" : endpoint_uri })
162216 files = s3 .ls (test_bucket_name )
163217 assert len (files ) > 0
@@ -167,9 +221,6 @@ def test_s3_list_files(s3):
167221def test_s3_read_rasterio (s3 ):
168222 # Lots of GDAL Environment variables needed for this to work !
169223 # https://gdal.org/user/virtual_file_systems.html#vsis3-aws-s3-files
170- for x in test_files :
171- file_name = os .path .join (DIRECTORY ,x )
172- s3 .put (file_name , f"{ test_bucket_name } /{ x } " )
173224 os .environ ['AWS_NO_SIGN_REQUEST' ]= 'YES'
174225 os .environ ['AWS_S3_ENDPOINT' ] = endpoint_uri .lstrip ('http://' )
175226 os .environ ['AWS_VIRTUAL_HOSTING' ]= 'FALSE'
@@ -187,9 +238,6 @@ def test_s3_read_rasterio(s3):
187238
188239
189240def test_s3_read_netcdf (s3 ):
190- for x in test_files :
191- file_name = os .path .join (DIRECTORY ,x )
192- s3 .put (file_name , f"{ test_bucket_name } /{ x } " )
193241 url = f's3://{ test_bucket_name } /example_1.nc'
194242 s3options = dict (client_kwargs = {"endpoint_url" : endpoint_uri })
195243 source = intake .open_netcdf (url ,
0 commit comments