diff --git a/.github/workflows/gradle-release.yml b/.github/workflows/gradle-release.yml index 2d18621..7592481 100644 --- a/.github/workflows/gradle-release.yml +++ b/.github/workflows/gradle-release.yml @@ -16,7 +16,11 @@ on: workflow_dispatch jobs: build: runs-on: ubuntu-latest - + services: + localstack: + image: localstack/localstack:latest + ports: + - 4566:4566 steps: - uses: actions/checkout@v3 with: @@ -34,4 +38,5 @@ jobs: GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }} SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }} - run: bash ./release.sh \ No newline at end of file + run: ./bin/release.sh + shell: bash \ No newline at end of file diff --git a/.github/workflows/pull-request-verifiy.yml b/.github/workflows/pull-request-verifiy.yml new file mode 100644 index 0000000..f05797e --- /dev/null +++ b/.github/workflows/pull-request-verifiy.yml @@ -0,0 +1,34 @@ +# Copyright 2022 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. + +name: Verify Pull Request + +on: pull_request +jobs: + build: + runs-on: ubuntu-latest + services: + localstack: + image: localstack/localstack:latest + ports: + - 4566:4566 + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Set up JDK + uses: actions/setup-java@v2 + with: + java-version: 11 + distribution: 'adopt' + + - name: Verify build + run: ./gradlew clean check --stacktrace + shell: bash \ No newline at end of file diff --git a/release.sh b/bin/release.sh old mode 100644 new mode 100755 similarity index 52% rename from release.sh rename to bin/release.sh index 3738467..0ded874 --- a/release.sh +++ b/bin/release.sh @@ -1,4 +1,12 @@ -#!/usr/bin/env sh +# Copyright 2022 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. set -e @@ -19,4 +27,4 @@ export ORG_GRADLE_PROJECT_ossrhPassword="${SONATYPE_PASSWORD}" -Psigning.gnupg.useLegacyGpg=true \ -Psigning.gnupg.keyName="${GPG_KEY_ID}" \ -Psigning.gnupg.passphrase="${GPG_PASSPHRASE}" \ - clean publish -x integrationTest --stacktrace \ No newline at end of file + clean publish --stacktrace \ No newline at end of file diff --git a/build.gradle b/build.gradle index bb98118..42abcf0 100644 --- a/build.gradle +++ b/build.gradle @@ -38,9 +38,6 @@ dependencies { testImplementation "junit:junit:4.12" testImplementation "org.mockito:mockito-core:2.23.4" testImplementation "com.tngtech.java:junit-dataprovider:1.13.1" - testImplementation "org.testcontainers:localstack:1.12.5" - testImplementation group: 'org.testcontainers', name: 'testcontainers', version: "1.9.1" - testImplementation group: 'cloud.localstack', name: 'localstack-utils', version: '0.1.15' testImplementation group: 'org.apache.hadoop', name: 'hadoop-aws', version: '3.1.1' testImplementation "org.apache.hadoop:hadoop-common:3.1.2" testImplementation "org.apache.hadoop:hadoop-common:3.1.2:tests" @@ -163,8 +160,8 @@ publishing { maven { url = "https://oss.sonatype.org/service/local/staging/deploy/maven2/" credentials { - username project.property("ossrhUsername").toString() - password project.property("ossrhPassword").toString() + username project.hasProperty("ossrhUsername") ? project.property("ossrhUsername").toString() : "" + password project.hasProperty("ossrhPassword") ? project.property("ossrhPassword").toString() : "" } } } @@ -208,6 +205,7 @@ signing { afterEvaluate { javadoc.dependsOn compileJava - jar.dependsOn test /* , integrationTest */ // remove running of integrationTests for now - shadowJar.dependsOn test, sourcesJar, javadocJar /* , integrationTest */ // remove running of integrationTests for now + check.dependsOn test, integrationTest + jar.dependsOn test, integrationTest + shadowJar.dependsOn test, sourcesJar, javadocJar, integrationTest } \ No newline at end of file diff --git a/docs/Contributing.md b/docs/Contributing.md index 3898547..d8599fd 100644 --- a/docs/Contributing.md +++ b/docs/Contributing.md @@ -43,4 +43,9 @@ Read [GitHub's pull request documentation](https://help.github.com/articles/abou ## Integration tests The integration tests use [localstack](https://github.com/localstack/localstack) to emulate cloud services such as S3 and DynamoDB. -To run integration tests locally you need to have [docker](https://docs.docker.com/docker-for-mac/install/) installed. \ No newline at end of file +Localstack runs as a [docker](https://www.docker.com/) container. + +Steps to run integration tests locally: +- Install docker [desktop](https://docs.docker.com/get-docker/). +- Install and start localstack following these [steps](https://docs.localstack.cloud/get-started/#localstack-cli). +- Run integration tests either from your IDE or though the gradle CLI `./gradle clean integrationTest`. \ No newline at end of file diff --git a/src/integrationTest/java/com/adobe/s3fs/DynamoDBMetadataStoreIntegrationTest.java b/src/integrationTest/java/com/adobe/s3fs/DynamoDBMetadataStoreIntegrationTest.java index b404080..0c31c3b 100644 --- a/src/integrationTest/java/com/adobe/s3fs/DynamoDBMetadataStoreIntegrationTest.java +++ b/src/integrationTest/java/com/adobe/s3fs/DynamoDBMetadataStoreIntegrationTest.java @@ -12,10 +12,6 @@ package com.adobe.s3fs; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import com.adobe.s3fs.common.configuration.FileSystemConfiguration; import com.adobe.s3fs.common.configuration.HadoopKeyValueConfiguration; import com.adobe.s3fs.common.context.FileSystemContext; @@ -28,52 +24,28 @@ import com.adobe.s3fs.utils.DynamoTable; import com.adobe.s3fs.utils.ITUtils; import com.adobe.s3fs.utils.InMemoryMetadataOperationLog; - import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; -import com.google.common.base.Functions; -import com.google.common.collect.*; - +import com.google.common.collect.FluentIterable; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.common.collect.Streams; import junit.framework.AssertionFailedError; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.ReflectionUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; - -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Optional; -import java.util.Random; -import java.util.Set; -import java.util.UUID; +import org.junit.*; + +import java.util.*; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.IntStream; -public class DynamoDBMetadataStoreIntegrationTest { - - @ClassRule - public static Network network = Network.newNetwork(); +import static org.junit.Assert.*; - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); +public class DynamoDBMetadataStoreIntegrationTest { @Rule - public DynamoTable dynamoTable = new DynamoTable(ITUtils.amazonDynamoDB(localStackContainer)); + public DynamoTable dynamoTable = new DynamoTable(ITUtils.amazonDynamoDB()); private DynamoDBMetadataStore metadataStore; @@ -88,7 +60,7 @@ public class DynamoDBMetadataStoreIntegrationTest { @BeforeClass public static void beforeClass() { - dynamoDB = ITUtils.amazonDynamoDB(localStackContainer); + dynamoDB = ITUtils.amazonDynamoDB(); } @Before @@ -99,7 +71,7 @@ public void setup() { ITUtils.mapBucketToTable(configuration, "bucket", dynamoTable.getTable()); - ITUtils.configureDynamoAccess(localStackContainer, configuration, "bucket"); + ITUtils.configureDynamoAccess(configuration, "bucket"); ITUtils.configureAsyncOperations(configuration, "bucket", "ctx"); diff --git a/src/integrationTest/java/com/adobe/s3fs/FileSystemIntegrationTest.java b/src/integrationTest/java/com/adobe/s3fs/FileSystemIntegrationTest.java index 36e30c3..ffbed46 100644 --- a/src/integrationTest/java/com/adobe/s3fs/FileSystemIntegrationTest.java +++ b/src/integrationTest/java/com/adobe/s3fs/FileSystemIntegrationTest.java @@ -12,87 +12,57 @@ package com.adobe.s3fs; -import static com.adobe.s3fs.utils.FileSystemStateChecker.checkFileSystemState; -import static com.adobe.s3fs.utils.FileSystemStateChecker.expectedDirectory; -import static com.adobe.s3fs.utils.FileSystemStateChecker.expectedFile; -import static com.adobe.s3fs.utils.OperationLogStateChecker.checkOperationLogState; -import static com.adobe.s3fs.utils.stream.StreamUtils.uncheckedRunnable; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import com.adobe.s3fs.common.runtime.FileSystemRuntimeFactory; import com.adobe.s3fs.filesystem.HadoopFileSystemAdapter; import com.adobe.s3fs.utils.DynamoTable; import com.adobe.s3fs.utils.ExpectedFSObject; import com.adobe.s3fs.utils.ITUtils; import com.adobe.s3fs.utils.S3Bucket; - import com.amazonaws.services.s3.AmazonS3; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; - import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.log4j.BasicConfigurator; +import org.apache.hadoop.fs.*; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.junit.Before; -import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; import java.io.ByteArrayOutputStream; import java.io.FileNotFoundException; import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; +import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.stream.Collectors; import java.util.stream.Stream; +import static com.adobe.s3fs.utils.FileSystemStateChecker.*; +import static com.adobe.s3fs.utils.OperationLogStateChecker.checkOperationLogState; +import static com.adobe.s3fs.utils.stream.StreamUtils.uncheckedRunnable; +import static org.junit.Assert.*; + public class FileSystemIntegrationTest { static { LogManager.getRootLogger().setLevel(Level.INFO); } - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule - public DynamoTable dynamoTable1 = new DynamoTable(ITUtils.amazonDynamoDB(localStackContainer)); + public DynamoTable dynamoTable1 = new DynamoTable(ITUtils.amazonDynamoDB()); @Rule - public S3Bucket bucket1 = new S3Bucket(ITUtils.amazonS3(localStackContainer)); + public S3Bucket bucket1 = new S3Bucket(ITUtils.amazonS3()); @Rule - public S3Bucket operationLogBucket = new S3Bucket(ITUtils.amazonS3(localStackContainer)); + public S3Bucket operationLogBucket = new S3Bucket(ITUtils.amazonS3()); private Configuration configuration; @@ -102,20 +72,20 @@ public class FileSystemIntegrationTest { @Before public void setup() throws IOException { - s3 = ITUtils.amazonS3(localStackContainer); + s3 = ITUtils.amazonS3(); configuration = new Configuration(false); configuration.setClass("fs.s3.impl", HadoopFileSystemAdapter.class, FileSystem.class); configuration.setBoolean("fs.s3.impl.disable.cache", true); ITUtils.configureAsyncOperations(configuration, bucket1.getBucket(), "ctx"); - ITUtils.configureDynamoAccess(localStackContainer, configuration, bucket1.getBucket()); + ITUtils.configureDynamoAccess(configuration, bucket1.getBucket()); ITUtils.mapBucketToTable(configuration, bucket1.getBucket(), dynamoTable1.getTable()); ITUtils.configureS3OperationLog(configuration, bucket1.getBucket(), operationLogBucket.getBucket()); - ITUtils.configureS3OperationLogAccess(localStackContainer, configuration, bucket1.getBucket()); + ITUtils.configureS3OperationLogAccess(configuration, bucket1.getBucket()); - ITUtils.configureS3AAsUnderlyingFileSystem(localStackContainer, configuration, bucket1.getBucket(), temporaryFolder.getRoot().toString()); + ITUtils.configureS3AAsUnderlyingFileSystem(configuration, bucket1.getBucket(), temporaryFolder.getRoot().toString()); ITUtils.configureSuffixCount(configuration, bucket1.getBucket(), 10); diff --git a/src/integrationTest/java/com/adobe/s3fs/S3PrefixListerTest.java b/src/integrationTest/java/com/adobe/s3fs/S3PrefixListerTest.java index 98d03c4..242ae4b 100644 --- a/src/integrationTest/java/com/adobe/s3fs/S3PrefixListerTest.java +++ b/src/integrationTest/java/com/adobe/s3fs/S3PrefixListerTest.java @@ -32,11 +32,12 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.log4j.Level; import org.apache.log4j.LogManager; -import org.junit.*; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.mockito.Mockito; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; import java.io.*; import java.nio.charset.StandardCharsets; @@ -49,22 +50,13 @@ public class S3PrefixListerTest { - @ClassRule public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = - new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - static { LogManager.getRootLogger().setLevel(Level.INFO); } @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); - @Rule public S3Bucket bucket1 = new S3Bucket(ITUtils.amazonS3(localStackContainer)); + @Rule public S3Bucket bucket1 = new S3Bucket(ITUtils.amazonS3()); private S3PrefixLister s3PrefixLister; @@ -119,7 +111,7 @@ private void verifyList(List expected, List actual) { @Before public void setup() throws IOException { - s3 = ITUtils.amazonS3(localStackContainer); + s3 = ITUtils.amazonS3(); s3Spy = Mockito.spy(s3); @@ -130,7 +122,7 @@ public void setup() throws IOException { configuration.setBoolean("fs.s3a.multiobjectdelete.enable", false); ITUtils.configureS3AAsUnderlyingFileSystem( - localStackContainer, configuration, bucket1.getBucket(), temporaryFolder.getRoot().toString()); + configuration, bucket1.getBucket(), temporaryFolder.getRoot().toString()); fileSystem = pathInBucket(bucket1.getBucket(), "").getFileSystem(configuration); diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/ContractUtils.java b/src/integrationTest/java/com/adobe/s3fs/contract/ContractUtils.java index 5ca6056..c9a9f0a 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/ContractUtils.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/ContractUtils.java @@ -18,30 +18,28 @@ import com.adobe.s3fs.utils.ITUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.testcontainers.containers.localstack.LocalStackContainer; public final class ContractUtils { private ContractUtils() {} public static void configureFullyFunctionalFileSystem(Configuration configuration, - LocalStackContainer localStackContainer, String tmpFolder) { - ITUtils.createBucketIfNotExists(ITUtils.amazonS3(localStackContainer), S3KFileSystemContract.BUCKET); - ITUtils.createBucketIfNotExists(ITUtils.amazonS3(localStackContainer), S3KFileSystemContract.OPLOG_BUCKET); - ITUtils.createMetaTableIfNotExists(ITUtils.amazonDynamoDB(localStackContainer), S3KFileSystemContract.DYNAMO_TABLE); + ITUtils.createBucketIfNotExists(ITUtils.amazonS3(), S3KFileSystemContract.BUCKET); + ITUtils.createBucketIfNotExists(ITUtils.amazonS3(), S3KFileSystemContract.OPLOG_BUCKET); + ITUtils.createMetaTableIfNotExists(ITUtils.amazonDynamoDB(), S3KFileSystemContract.DYNAMO_TABLE); configuration.setClass("fs.s3k.impl", HadoopFileSystemAdapter.class, FileSystem.class); configuration.setBoolean("fs.s3k.impl.disable.cache", true); ITUtils.configureAsyncOperations(configuration, S3KFileSystemContract.BUCKET, "ctx"); - ITUtils.configureDynamoAccess(localStackContainer, configuration, S3KFileSystemContract.BUCKET); + ITUtils.configureDynamoAccess(configuration, S3KFileSystemContract.BUCKET); ITUtils.mapBucketToTable(configuration, S3KFileSystemContract.BUCKET, S3KFileSystemContract.DYNAMO_TABLE); ITUtils.configureS3OperationLog(configuration, S3KFileSystemContract.BUCKET, S3KFileSystemContract.OPLOG_BUCKET); - ITUtils.configureS3OperationLogAccess(localStackContainer, configuration, S3KFileSystemContract.BUCKET); + ITUtils.configureS3OperationLogAccess(configuration, S3KFileSystemContract.BUCKET); - ITUtils.configureS3AAsUnderlyingFileSystem(localStackContainer, configuration, S3KFileSystemContract.BUCKET, tmpFolder); + ITUtils.configureS3AAsUnderlyingFileSystem(configuration, S3KFileSystemContract.BUCKET, tmpFolder); ITUtils.configureSuffixCount(configuration, S3KFileSystemContract.BUCKET, 10); diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractCreate.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractCreate.java index ed924ce..2d8bffe 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractCreate.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractCreate.java @@ -18,23 +18,11 @@ import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.junit.ClassRule; import org.junit.Test; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractCreate extends AbstractContractCreateTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - public TemporaryFolder temporaryFolder; @Override @@ -52,7 +40,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractDelete.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractDelete.java index e363e1b..811ef8c 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractDelete.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractDelete.java @@ -17,22 +17,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractDelete extends AbstractContractDeleteTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - public TemporaryFolder temporaryFolder; @Override @@ -50,7 +38,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } } diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractGetFileStatus.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractGetFileStatus.java index 882488f..1d8e9bc 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractGetFileStatus.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractGetFileStatus.java @@ -17,22 +17,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractGetFileStatus extends AbstractContractGetFileStatusTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - public TemporaryFolder temporaryFolder; @Override @@ -50,7 +38,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } } diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractMkdir.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractMkdir.java index 475cc16..151c020 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractMkdir.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractMkdir.java @@ -18,22 +18,10 @@ import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractMkdir extends AbstractContractMkdirTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - public TemporaryFolder temporaryFolder; @Override @@ -51,7 +39,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractOpen.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractOpen.java index e1fff4c..881750d 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractOpen.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractOpen.java @@ -17,22 +17,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractOpen extends AbstractContractOpenTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - public TemporaryFolder temporaryFolder; @Override @@ -50,7 +38,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } } diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRename.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRename.java index fa275ff..07355c1 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRename.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRename.java @@ -17,20 +17,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractRename extends AbstractContractRenameTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); public TemporaryFolder temporaryFolder; @@ -49,7 +38,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } } diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRootDirectory.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRootDirectory.java index 0956799..7428d06 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRootDirectory.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractRootDirectory.java @@ -18,22 +18,10 @@ import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractRootDirectory extends AbstractContractRootDirectoryTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - public TemporaryFolder temporaryFolder; @Override @@ -51,7 +39,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } diff --git a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractSeek.java b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractSeek.java index 832715b..5adc74d 100644 --- a/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractSeek.java +++ b/src/integrationTest/java/com/adobe/s3fs/contract/TestS3KContractSeek.java @@ -17,22 +17,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.localstack.LocalStackContainer; public class TestS3KContractSeek extends AbstractContractSeekTest { - @ClassRule - public static Network network = Network.newNetwork(); - - @ClassRule - public static LocalStackContainer localStackContainer = new LocalStackContainer() - .withNetwork(network) - .withServices(LocalStackContainer.Service.DYNAMODB, LocalStackContainer.Service.S3) - .withNetworkAliases("localstack"); - public TemporaryFolder temporaryFolder; @Override @@ -50,7 +38,7 @@ public void teardown() throws Exception { @Override protected AbstractFSContract createContract(Configuration configuration) { - ContractUtils.configureFullyFunctionalFileSystem(configuration, localStackContainer, temporaryFolder.getRoot().toString()); + ContractUtils.configureFullyFunctionalFileSystem(configuration, temporaryFolder.getRoot().toString()); return new S3KFileSystemContract(configuration); } } diff --git a/src/integrationTest/java/com/adobe/s3fs/utils/ITUtils.java b/src/integrationTest/java/com/adobe/s3fs/utils/ITUtils.java index 07d5faf..db442db 100644 --- a/src/integrationTest/java/com/adobe/s3fs/utils/ITUtils.java +++ b/src/integrationTest/java/com/adobe/s3fs/utils/ITUtils.java @@ -15,6 +15,7 @@ import com.adobe.s3fs.metastore.internal.dynamodb.storage.DynamoDBStorageConfiguration; import com.adobe.s3fs.operationlog.S3MetadataOperationLogFactory; import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; @@ -27,15 +28,21 @@ import com.amazonaws.services.s3.model.S3ObjectSummary; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider; import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.junit.rules.TemporaryFolder; -import org.testcontainers.containers.localstack.LocalStackContainer; import java.util.ArrayList; import java.util.List; public final class ITUtils { + // default port for all AWS services exposed by localstack is 4566 + public static final AwsClientBuilder.EndpointConfiguration S3_ENDPOINT = + new AwsClientBuilder.EndpointConfiguration("http://localhost.localstack.cloud:4566/", "us-east-1"); + public static final AwsClientBuilder.EndpointConfiguration DYNAMODB_ENDPOINT = + new AwsClientBuilder.EndpointConfiguration("http://localhost.localstack.cloud:4566/", "us-east-1"); + public static final AWSCredentialsProvider DEFAULT_AWS_CREDENTIALS_PROVIDER = new BasicAWSCredentialsProvider("dummy", "dummy"); + public static void createMetaTableIfNotExists(AmazonDynamoDB dynamoDB, String tableName) { try { DescribeTableResult ignored = dynamoDB.describeTable(tableName); @@ -68,35 +75,32 @@ public static void setFileSystemContext(String context) { System.setProperty("fs.s3k.metastore.context.id", context); } - public static void configureDynamoAccess(LocalStackContainer container, Configuration configuration, String bucket) { - AwsClientBuilder.EndpointConfiguration endpointConfiguration = container.getEndpointConfiguration(LocalStackContainer.Service.DYNAMODB); + public static void configureDynamoAccess(Configuration configuration, String bucket) { configuration.set(DynamoDBStorageConfiguration.AWS_ENDPOINT + "." + bucket, - endpointConfiguration.getServiceEndpoint()); + DYNAMODB_ENDPOINT.getServiceEndpoint()); configuration.set(DynamoDBStorageConfiguration.AWS_SIGNING_REGION + "." + bucket, - endpointConfiguration.getSigningRegion()); + DYNAMODB_ENDPOINT.getSigningRegion()); - AWSCredentials awsCredentials = container.getDefaultCredentialsProvider().getCredentials(); configuration.set(DynamoDBStorageConfiguration.AWS_ACCESS_KEY_ID + "." + bucket, - awsCredentials.getAWSAccessKeyId()); + DEFAULT_AWS_CREDENTIALS_PROVIDER.getCredentials().getAWSAccessKeyId()); configuration.set(DynamoDBStorageConfiguration.AWS_SECRET_ACCESS_KEY + "." + bucket, - awsCredentials.getAWSSecretKey()); + DEFAULT_AWS_CREDENTIALS_PROVIDER.getCredentials().getAWSSecretKey()); } public static void configureS3OperationLog(Configuration configuration, String dataBucket, String operationLogBucket) { configuration.set(S3MetadataOperationLogFactory.OPERATION_LOG_BUCKET + "." + dataBucket, operationLogBucket); } - public static void configureS3OperationLogAccess(LocalStackContainer container, Configuration configuration, String bucket) { - AwsClientBuilder.EndpointConfiguration endpointConfiguration = container.getEndpointConfiguration(LocalStackContainer.Service.S3); - configuration.set(S3MetadataOperationLogFactory.AWS_ENDPOINT + "." + bucket, endpointConfiguration.getServiceEndpoint()); - configuration.set(S3MetadataOperationLogFactory.AWS_SIGNING_REGION + "." + bucket, endpointConfiguration.getSigningRegion()); + public static void configureS3OperationLogAccess(Configuration configuration, String bucket) { + configuration.set(S3MetadataOperationLogFactory.AWS_ENDPOINT + "." + bucket, S3_ENDPOINT.getServiceEndpoint()); + configuration.set(S3MetadataOperationLogFactory.AWS_SIGNING_REGION + "." + bucket, S3_ENDPOINT.getSigningRegion()); - AWSCredentials awsCredentials = container.getDefaultCredentialsProvider().getCredentials(); + AWSCredentials awsCredentials = DEFAULT_AWS_CREDENTIALS_PROVIDER.getCredentials(); configuration.set(S3MetadataOperationLogFactory.AWS_ACCESS_KEY_ID + "." + bucket, awsCredentials.getAWSAccessKeyId()); configuration.set(S3MetadataOperationLogFactory.AWS_SECRET_ACCESS_KEY + "." + bucket, awsCredentials.getAWSSecretKey()); } - public static void configureS3AAsUnderlyingFileSystem(LocalStackContainer container, Configuration configuration, String bucket, + public static void configureS3AAsUnderlyingFileSystem(Configuration configuration, String bucket, String tmpPath) { System.setProperty(SkipMd5CheckStrategy.DISABLE_GET_OBJECT_MD5_VALIDATION_PROPERTY, "true"); System.setProperty(SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY, "true"); @@ -106,10 +110,9 @@ public static void configureS3AAsUnderlyingFileSystem(LocalStackContainer contai configuration.setClass("fs.s3a.impl", S3AFileSystem.class, FileSystem.class); configuration.set("fs.s3a.buffer.dir", tmpPath); - configuration.set("fs.s3a.access.key", container.getDefaultCredentialsProvider().getCredentials().getAWSAccessKeyId()); - configuration.set("fs.s3a.secret.key", container.getDefaultCredentialsProvider().getCredentials().getAWSSecretKey()); - configuration.set("fs.s3a.endpoint", - container.getEndpointConfiguration(LocalStackContainer.Service.S3).getServiceEndpoint()); + configuration.set("fs.s3a.access.key", DEFAULT_AWS_CREDENTIALS_PROVIDER.getCredentials().getAWSAccessKeyId()); + configuration.set("fs.s3a.secret.key", DEFAULT_AWS_CREDENTIALS_PROVIDER.getCredentials().getAWSSecretKey()); + configuration.set("fs.s3a.endpoint", S3_ENDPOINT.getServiceEndpoint()); } public static void mapBucketToTable(Configuration configuration, String bucket, String table) { @@ -138,17 +141,17 @@ public static void configureAsyncOperations(Configuration configuration, String configuration.setBoolean("fs.s3k.metastore.operations.async." + bucket + "." + context, true); } - public static AmazonS3 amazonS3(LocalStackContainer localStackContainer) { + public static AmazonS3 amazonS3() { return AmazonS3ClientBuilder.standard() - .withEndpointConfiguration(localStackContainer.getEndpointConfiguration(LocalStackContainer.Service.S3)) - .withCredentials(localStackContainer.getDefaultCredentialsProvider()) + .withEndpointConfiguration(S3_ENDPOINT) + .withCredentials(DEFAULT_AWS_CREDENTIALS_PROVIDER) .build(); } - public static AmazonDynamoDB amazonDynamoDB(LocalStackContainer localStackContainer) { + public static AmazonDynamoDB amazonDynamoDB() { return AmazonDynamoDBClientBuilder.standard() - .withEndpointConfiguration(localStackContainer.getEndpointConfiguration(LocalStackContainer.Service.DYNAMODB)) - .withCredentials(localStackContainer.getDefaultCredentialsProvider()) + .withEndpointConfiguration(DYNAMODB_ENDPOINT) + .withCredentials(DEFAULT_AWS_CREDENTIALS_PROVIDER) .build(); } }