@@ -149,69 +149,15 @@ def test_http_read_netcdf_simplecache(data_server):
149149# S3
150150#based on: https://github.com/dask/s3fs/blob/master/s3fs/tests/test_s3fs.py
151151test_bucket_name = "test"
152- PORT_S3 = 8001
153- endpoint_uri = "http://localhost:%s" % PORT_S3
154152test_files = ['RGB.byte.tif' , 'example_1.nc' ]
155153
156- @pytest .fixture ()
157- def s3_base ():
158- # writable local S3 system
159- import shlex
160- import subprocess
161-
162- proc = subprocess .Popen (shlex .split ("moto_server s3 -p %s" % PORT_S3 ),
163- stderr = subprocess .DEVNULL , stdout = subprocess .DEVNULL , stdin = subprocess .DEVNULL )
164-
165- timeout = 5
166- while timeout > 0 :
167- try :
168- print ("polling for moto server" )
169-
170- r = requests .get (endpoint_uri )
171- if r .ok :
172- break
173- except :
174- pass
175- timeout -= 0.1
176- time .sleep (0.1 )
177- print ("server up" )
178- yield
179- print ("moto done" )
180- proc .terminate ()
181- proc .wait ()
182-
183-
184- @pytest .fixture (scope = 'function' )
185- def aws_credentials ():
186- """Mocked AWS Credentials for moto."""
187- os .environ ['AWS_ACCESS_KEY_ID' ] = 'testing'
188- os .environ ['AWS_SECRET_ACCESS_KEY' ] = 'testing'
189- os .environ ['AWS_SECURITY_TOKEN' ] = 'testing'
190- os .environ ['AWS_SESSION_TOKEN' ] = 'testing'
191-
192-
193- @pytest .fixture ()
194- def s3 (s3_base , aws_credentials ):
195- ''' anonymous access local s3 bucket for testing '''
196- from botocore .session import Session
197- session = Session ()
198- client = session .create_client ("s3" , endpoint_url = endpoint_uri )
199- client .create_bucket (Bucket = test_bucket_name , ACL = "public-read" )
200-
201- for file_name in [os .path .join (DIRECTORY ,x ) for x in test_files ]:
202- with open (file_name , 'rb' ) as f :
203- data = f .read ()
204- key = os .path .basename (file_name )
205- client .put_object (Bucket = test_bucket_name , Key = key , Body = data )
206-
207- # Make sure cache not being used
208- s3fs .S3FileSystem .clear_instance_cache ()
209- s3 = s3fs .S3FileSystem (anon = True , client_kwargs = {"endpoint_url" : endpoint_uri })
210- s3 .invalidate_cache ()
211- yield
154+ from s3fs .tests .test_s3fs import s3 , s3_base , endpoint_uri
212155
213156
214157def test_s3_list_files (s3 ):
158+ for x in test_files :
159+ file_name = os .path .join (DIRECTORY ,x )
160+ s3 .put (file_name , f"{ test_bucket_name } /{ x } " )
215161 s3 = s3fs .S3FileSystem (anon = True , client_kwargs = {"endpoint_url" : endpoint_uri })
216162 files = s3 .ls (test_bucket_name )
217163 assert len (files ) > 0
@@ -221,6 +167,9 @@ def test_s3_list_files(s3):
221167def test_s3_read_rasterio (s3 ):
222168 # Lots of GDAL Environment variables needed for this to work !
223169 # https://gdal.org/user/virtual_file_systems.html#vsis3-aws-s3-files
170+ for x in test_files :
171+ file_name = os .path .join (DIRECTORY ,x )
172+ s3 .put (file_name , f"{ test_bucket_name } /{ x } " )
224173 os .environ ['AWS_NO_SIGN_REQUEST' ]= 'YES'
225174 os .environ ['AWS_S3_ENDPOINT' ] = endpoint_uri .lstrip ('http://' )
226175 os .environ ['AWS_VIRTUAL_HOSTING' ]= 'FALSE'
@@ -238,6 +187,9 @@ def test_s3_read_rasterio(s3):
238187
239188
240189def test_s3_read_netcdf (s3 ):
190+ for x in test_files :
191+ file_name = os .path .join (DIRECTORY ,x )
192+ s3 .put (file_name , f"{ test_bucket_name } /{ x } " )
241193 url = f's3://{ test_bucket_name } /example_1.nc'
242194 s3options = dict (client_kwargs = {"endpoint_url" : endpoint_uri })
243195 source = intake .open_netcdf (url ,
0 commit comments