diff --git a/infra/storage_client/docker-compose.yml b/infra/storage_client/docker-compose.yml index 78c60a94a..44b31189b 100644 --- a/infra/storage_client/docker-compose.yml +++ b/infra/storage_client/docker-compose.yml @@ -38,10 +38,14 @@ services: FILE_STORAGE_BACKEND_PATH: /tmp/storage ENABLE_IMAGE_TRANSFORMATION: "true" IMGPROXY_URL: http://imgproxy:8080 + DEBUG: "knex:*" + volumes: - assets-volume:/tmp/storage healthcheck: test: ['CMD-SHELL', 'curl -f -LI http://localhost:5000/status'] + interval: 2s + db: build: context: ./postgres @@ -62,6 +66,20 @@ services: timeout: 5s retries: 5 + dummy_data: + build: + context: ./postgres + depends_on: + storage: + condition: service_healthy + volumes: + - ./postgres:/sql + command: + - psql + - "postgresql://postgres:postgres@db:5432/postgres" + - -f + - /sql/dummy-data.sql + imgproxy: image: darthsim/imgproxy ports: @@ -73,4 +91,4 @@ services: - IMGPROXY_USE_ETAG=true - IMGPROXY_ENABLE_WEBP_DETECTION=true volumes: - assets-volume: \ No newline at end of file + assets-volume: diff --git a/infra/storage_client/postgres/Dockerfile b/infra/storage_client/postgres/Dockerfile index bb2198b83..6364316c9 100644 --- a/infra/storage_client/postgres/Dockerfile +++ b/infra/storage_client/postgres/Dockerfile @@ -3,7 +3,6 @@ FROM supabase/postgres:0.13.0 COPY 00-initial-schema.sql /docker-entrypoint-initdb.d/00-initial-schema.sql COPY auth-schema.sql /docker-entrypoint-initdb.d/01-auth-schema.sql COPY storage-schema.sql /docker-entrypoint-initdb.d/02-storage-schema.sql -COPY dummy-data.sql /docker-entrypoint-initdb.d/03-dummy-data.sql # Build time defaults ARG build_POSTGRES_DB=postgres @@ -17,4 +16,4 @@ ENV POSTGRES_USER=$build_POSTGRES_USER ENV POSTGRES_PASSWORD=$build_POSTGRES_PASSWORD ENV POSTGRES_PORT=$build_POSTGRES_PORT -EXPOSE 5432 \ No newline at end of file +EXPOSE 5432 diff --git a/infra/storage_client/postgres/storage-schema.sql b/infra/storage_client/postgres/storage-schema.sql index c879c6b62..08142a13f 100644 --- a/infra/storage_client/postgres/storage-schema.sql +++ b/infra/storage_client/postgres/storage-schema.sql @@ -28,7 +28,6 @@ CREATE TABLE "storage"."objects" ( "last_accessed_at" timestamptz DEFAULT now(), "metadata" jsonb, CONSTRAINT "objects_bucketId_fkey" FOREIGN KEY ("bucket_id") REFERENCES "storage"."buckets"("id"), - CONSTRAINT "objects_owner_fkey" FOREIGN KEY ("owner") REFERENCES "auth"."users"("id"), PRIMARY KEY ("id") ); CREATE UNIQUE INDEX "bucketid_objname" ON "storage"."objects" USING BTREE ("bucket_id","name"); @@ -85,27 +84,24 @@ CREATE OR REPLACE FUNCTION storage.search(prefix text, bucketname text, limits i ) LANGUAGE plpgsql AS $function$ -DECLARE -_bucketId text; BEGIN - select buckets."id" from buckets where buckets.name=bucketname limit 1 into _bucketId; - return query + return query with files_folders as ( select ((string_to_array(objects.name, '/'))[levels]) as folder from objects where objects.name ilike prefix || '%' - and bucket_id = _bucketId + and bucket_id = bucketname GROUP by folder limit limits offset offsets - ) - select files_folders.folder as name, objects.id, objects.updated_at, objects.created_at, objects.last_accessed_at, objects.metadata from files_folders + ) + select files_folders.folder as name, objects.id, objects.updated_at, objects.created_at, objects.last_accessed_at, objects.metadata from files_folders left join objects - on prefix || files_folders.folder = objects.name - where objects.id is null or objects.bucket_id=_bucketId; + on prefix || files_folders.folder = objects.name and objects.bucket_id=bucketname; END $function$; GRANT ALL PRIVILEGES ON SCHEMA storage TO postgres; GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA storage TO postgres; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO postgres; \ No newline at end of file +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA storage TO postgres; + diff --git a/infra/storage_client/storage/Dockerfile b/infra/storage_client/storage/Dockerfile index c14b3d9e0..666b7ad52 100644 --- a/infra/storage_client/storage/Dockerfile +++ b/infra/storage_client/storage/Dockerfile @@ -1,3 +1,3 @@ -FROM supabase/storage-api:v0.35.1 +FROM supabase/storage-api:v1.8.2 -RUN apk add curl --no-cache \ No newline at end of file +RUN apk add curl --no-cache diff --git a/packages/storage_client/lib/src/storage_file_api.dart b/packages/storage_client/lib/src/storage_file_api.dart index 39ae64339..848cd98b2 100644 --- a/packages/storage_client/lib/src/storage_file_api.dart +++ b/packages/storage_client/lib/src/storage_file_api.dart @@ -276,7 +276,13 @@ class StorageFileApi { /// example `folder/image.png`. /// [toPath] is the new file path, including the new file name. For example /// `folder/image-new.png`. - Future move(String fromPath, String toPath) async { + /// + /// When copying to a different bucket, you have to specify the [destinationBucket]. + Future move( + String fromPath, + String toPath, { + String? destinationBucket, + }) async { final options = FetchOptions(headers: headers); final response = await _storageFetch.post( '$url/object/move', @@ -284,6 +290,7 @@ class StorageFileApi { 'bucketId': bucketId, 'sourceKey': fromPath, 'destinationKey': toPath, + if (destinationBucket != null) 'destinationBucket': destinationBucket, }, options: options, ); @@ -297,7 +304,13 @@ class StorageFileApi { /// /// [toPath] is the new file path, including the new file name. For example /// `folder/image-copy.png`. - Future copy(String fromPath, String toPath) async { + /// + /// When copying to a different bucket, you have to specify the [destinationBucket]. + Future copy( + String fromPath, + String toPath, { + String? destinationBucket, + }) async { final options = FetchOptions(headers: headers); final response = await _storageFetch.post( '$url/object/copy', @@ -305,6 +318,7 @@ class StorageFileApi { 'bucketId': bucketId, 'sourceKey': fromPath, 'destinationKey': toPath, + if (destinationBucket != null) 'destinationBucket': destinationBucket, }, options: options, ); diff --git a/packages/storage_client/test/client_test.dart b/packages/storage_client/test/client_test.dart index 0560faccc..4cca4bd35 100644 --- a/packages/storage_client/test/client_test.dart +++ b/packages/storage_client/test/client_test.dart @@ -388,5 +388,51 @@ void main() { await storage.from(newBucketName).copy(uploadPath, "$uploadPath 2"); }); + + test('copy to different bucket', () async { + final storage = SupabaseStorageClient( + storageUrl, {'Authorization': 'Bearer $storageKey'}); + + try { + await storage.from('bucket2').download(uploadPath); + fail('File that does not exist was found'); + } on StorageException catch (error) { + expect(error.error, 'not_found'); + } + await storage + .from(newBucketName) + .copy(uploadPath, uploadPath, destinationBucket: 'bucket2'); + try { + await storage.from('bucket2').download(uploadPath); + } catch (error) { + fail('File that was copied was not found'); + } + }); + + test('move to different bucket', () async { + final storage = SupabaseStorageClient( + storageUrl, {'Authorization': 'Bearer $storageKey'}); + + try { + await storage.from('bucket2').download('$uploadPath 3'); + fail('File that does not exist was found'); + } on StorageException catch (error) { + expect(error.error, 'not_found'); + } + await storage + .from(newBucketName) + .move(uploadPath, '$uploadPath 3', destinationBucket: 'bucket2'); + try { + await storage.from('bucket2').download('$uploadPath 3'); + } catch (error) { + fail('File that was moved was not found'); + } + try { + await storage.from(newBucketName).download(uploadPath); + fail('File that was moved was found'); + } on StorageException catch (error) { + expect(error.error, 'not_found'); + } + }); }); }