diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureBlobStoreBackend.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureBlobStoreBackend.java index 9e40a187992..07964343429 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureBlobStoreBackend.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureBlobStoreBackend.java @@ -18,9 +18,9 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureDataStoreService.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureDataStoreService.java index dce503f9797..d15f7505c63 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureDataStoreService.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureDataStoreService.java @@ -24,8 +24,8 @@ import java.util.Map; import java.util.Properties; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreService; import org.osgi.framework.Constants; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java index c5bfa6ca904..25227478113 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProvider.java @@ -30,13 +30,12 @@ import com.azure.storage.blob.specialized.BlockBlobClient; import com.azure.storage.common.policy.RequestRetryOptions; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; import java.net.URISyntaxException; import java.security.InvalidKeyException; import java.time.OffsetDateTime; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java index 0f18882dcc0..9a53f45ac75 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java @@ -35,9 +35,9 @@ import com.azure.storage.common.policy.RequestRetryOptions; import com.microsoft.azure.storage.RetryPolicy; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.guava.common.cache.Cache; import org.apache.jackrabbit.guava.common.cache.CacheBuilder; import org.apache.jackrabbit.oak.commons.PropertiesUtil; @@ -90,7 +90,6 @@ import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadToken; import static java.nio.file.StandardOpenOption.DELETE_ON_CLOSE; -import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_BUFFERED_STREAM_THRESHOLD; import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_DEFAULT_CONCURRENT_REQUEST_COUNT; import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_LAST_MODIFIED_KEY; import static org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants.AZURE_BLOB_MAX_ALLOWABLE_UPLOAD_URIS; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java index bb3b15d0066..6020e17f267 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java @@ -22,9 +22,9 @@ import java.net.URI; import java.util.Properties; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8.AzureBlobStoreBackendV8; import org.apache.jackrabbit.oak.commons.properties.SystemPropertySupplier; import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java index 6ba260defac..1aa1edcacda 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java @@ -36,7 +36,7 @@ import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8.java index 9f717e8d9e1..b2fbc67ff66 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8.java @@ -34,7 +34,7 @@ import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; import org.jetbrains.annotations.NotNull; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8.java index 11904fb3a55..95acd908dd9 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8.java @@ -86,9 +86,9 @@ import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AbstractAzureBlobStoreBackend; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Utils; diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8.java index a21c1a1a313..c0b5e92cc04 100644 --- a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8.java +++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8.java @@ -29,7 +29,7 @@ import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.jetbrains.annotations.NotNull; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProviderTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProviderTest.java index 4f7a35ebe89..e5d40a164a3 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProviderTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobContainerProviderTest.java @@ -26,9 +26,7 @@ import com.microsoft.aad.msal4j.MsalServiceException; import org.apache.commons.lang3.reflect.MethodUtils; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.junit.After; -import org.junit.Before; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.junit.ClassRule; import org.junit.Test; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java index 0b54c7870bf..610fbc84bc9 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackendTest.java @@ -27,9 +27,9 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.reflect.MethodUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.guava.common.cache.Cache; import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderCDNTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderCDNTest.java index 1a81f9f2782..3f5c54cc102 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderCDNTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderCDNTest.java @@ -33,7 +33,7 @@ import java.util.Properties; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions; import org.apache.jackrabbit.oak.api.blob.BlobUploadOptions; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.ConfigurableDataRecordAccessProvider; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java index 66f1aff3848..46b9379948b 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderIT.java @@ -25,10 +25,10 @@ import javax.net.ssl.HttpsURLConnection; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.AbstractDataRecordAccessProviderIT; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.ConfigurableDataRecordAccessProvider; import org.junit.BeforeClass; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java index 8ae50086fb5..1e5243778b8 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataRecordAccessProviderTest.java @@ -33,10 +33,10 @@ import javax.net.ssl.HttpsURLConnection; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.AbstractDataRecordAccessProviderTest; @@ -46,7 +46,6 @@ import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException; import org.apache.jackrabbit.oak.spi.blob.BlobOptions; import org.jetbrains.annotations.NotNull; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java index b18b2650ccb..2f42b527d62 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java @@ -26,9 +26,9 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.NullOutputStream; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8.AzureBlobStoreBackendV8; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java index 7bfae732dac..640fb1f394c 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java @@ -36,7 +36,7 @@ import com.azure.storage.blob.BlobContainerClient; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.MapUtils; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java index c40eaa76e5a..47c704d842d 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java @@ -20,7 +20,7 @@ import static org.junit.Assume.assumeTrue; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.commons.junit.LogCustomizer; import org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreTest; import org.junit.After; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java index 915a56f344a..c03b869a4e0 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java @@ -16,8 +16,8 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; -import org.apache.jackrabbit.core.data.CachingDataStore; -import org.apache.jackrabbit.core.data.LocalCache; +import org.apache.jackrabbit.oak.spi.blob.data.CachingDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.LocalCache; import org.junit.Before; /** diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java index 8da9def7e86..01efe76648b 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java @@ -16,10 +16,11 @@ */ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage; +import org.apache.jackrabbit.oak.spi.blob.data.CachingDataStore; import org.junit.Before; /** - * Test {@link org.apache.jackrabbit.core.data.CachingDataStore} with AzureBlobStoreBackend + * Test {@link CachingDataStore} with AzureBlobStoreBackend * and local cache Off. * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'. * See details @ {@link AzureDataStoreUtils}. diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java index 00d528e3d3f..b33d14988e0 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/UtilsTest.java @@ -18,7 +18,7 @@ import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.common.policy.RequestRetryOptions; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8AuthenticationTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8AuthenticationTest.java index d4a54f04d5b..4c2a244e8c9 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8AuthenticationTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8AuthenticationTest.java @@ -19,7 +19,7 @@ package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.v8; import org.apache.commons.lang3.reflect.MethodUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.junit.After; import org.junit.Test; import org.mockito.MockedStatic; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ContainerOperationsTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ContainerOperationsTest.java index 7196d8d42ce..567d8adddc5 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ContainerOperationsTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ContainerOperationsTest.java @@ -20,16 +20,14 @@ import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.CloudBlobContainer; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.junit.After; import org.junit.ClassRule; import org.junit.Test; -import org.mockito.MockedStatic; import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ErrorConditionsTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ErrorConditionsTest.java index 5bd94c7c40f..821f55332ee 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ErrorConditionsTest.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8ErrorConditionsTest.java @@ -23,13 +23,12 @@ import com.microsoft.azure.storage.blob.CloudBlockBlob; import com.microsoft.azure.storage.blob.SharedAccessBlobHeaders; import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockedStatic; -import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import java.lang.reflect.Field; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8Test.java index bb5969d60e4..0249636de0b 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8Test.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobContainerProviderV8Test.java @@ -24,8 +24,8 @@ import com.microsoft.azure.storage.blob.SharedAccessBlobHeaders; import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.jetbrains.annotations.NotNull; diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java index 49cfff277db..6e56a6c225b 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/AzureBlobStoreBackendV8Test.java @@ -23,8 +23,9 @@ import com.microsoft.azure.storage.blob.SharedAccessBlobPermissions; import com.microsoft.azure.storage.blob.SharedAccessBlobPolicy; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzuriteDockerRule; import org.jetbrains.annotations.NotNull; @@ -503,7 +504,7 @@ public void testReadNonExistentBlob() throws Exception { backend.init(); try { - backend.read(new org.apache.jackrabbit.core.data.DataIdentifier("nonexistent")); + backend.read(new DataIdentifier("nonexistent")); fail("Expected DataStoreException when reading non-existent blob"); } catch (DataStoreException e) { assertTrue("Should contain missing blob error", e.getMessage().contains("Trying to read missing blob")); @@ -519,7 +520,7 @@ public void testGetRecordNonExistent() throws Exception { backend.init(); try { - backend.getRecord(new org.apache.jackrabbit.core.data.DataIdentifier("nonexistent")); + backend.getRecord(new DataIdentifier("nonexistent")); fail("Expected DataStoreException when getting non-existent record"); } catch (DataStoreException e) { assertTrue("Should contain retrieve blob error", e.getMessage().contains("Cannot retrieve blob")); @@ -535,7 +536,7 @@ public void testDeleteNonExistentRecord() throws Exception { backend.init(); // Should not throw exception when deleting non-existent record - backend.deleteRecord(new org.apache.jackrabbit.core.data.DataIdentifier("nonexistent")); + backend.deleteRecord(new DataIdentifier("nonexistent")); // No exception expected assertTrue("Delete should not throw exception for non-existent record", true); } @@ -686,7 +687,7 @@ public void testWriteWithNullFile() throws Exception { backend.init(); try { - backend.write(new org.apache.jackrabbit.core.data.DataIdentifier("test"), null); + backend.write(new DataIdentifier("test"), null); fail("Expected NullPointerException for null file"); } catch (NullPointerException e) { assertEquals("file must not be null", e.getMessage()); @@ -781,8 +782,8 @@ public void testGetAllIdentifiers() throws Exception { writer2.write("test content 2"); } - org.apache.jackrabbit.core.data.DataIdentifier id1 = new org.apache.jackrabbit.core.data.DataIdentifier("test1"); - org.apache.jackrabbit.core.data.DataIdentifier id2 = new org.apache.jackrabbit.core.data.DataIdentifier("test2"); + DataIdentifier id1 = new DataIdentifier("test1"); + DataIdentifier id2 = new DataIdentifier("test2"); try { // Write test records @@ -790,12 +791,12 @@ public void testGetAllIdentifiers() throws Exception { backend.write(id2, tempFile2); // Test getAllIdentifiers - java.util.Iterator identifiers = backend.getAllIdentifiers(); + java.util.Iterator identifiers = backend.getAllIdentifiers(); assertNotNull("Identifiers iterator should not be null", identifiers); java.util.Set foundIds = new java.util.HashSet<>(); while (identifiers.hasNext()) { - org.apache.jackrabbit.core.data.DataIdentifier id = identifiers.next(); + DataIdentifier id = identifiers.next(); foundIds.add(id.toString()); } @@ -830,8 +831,8 @@ public void testGetAllRecords() throws Exception { writer2.write(content2); } - org.apache.jackrabbit.core.data.DataIdentifier id1 = new org.apache.jackrabbit.core.data.DataIdentifier("test1"); - org.apache.jackrabbit.core.data.DataIdentifier id2 = new org.apache.jackrabbit.core.data.DataIdentifier("test2"); + DataIdentifier id1 = new DataIdentifier("test1"); + DataIdentifier id2 = new DataIdentifier("test2"); try { // Write test records @@ -884,7 +885,7 @@ public void testWriteAndReadActualFile() throws Exception { writer.write(testContent); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = new org.apache.jackrabbit.core.data.DataIdentifier("writetest"); + DataIdentifier identifier = new DataIdentifier("writetest"); try { // Write the file @@ -926,7 +927,7 @@ public void testWriteExistingFileWithSameLength() throws Exception { writer.write(testContent); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = new org.apache.jackrabbit.core.data.DataIdentifier("existingtest"); + DataIdentifier identifier = new DataIdentifier("existingtest"); try { // Write the file first time @@ -972,7 +973,7 @@ public void testWriteExistingFileWithDifferentLength() throws Exception { writer.write(content2); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = new org.apache.jackrabbit.core.data.DataIdentifier("lengthtest"); + DataIdentifier identifier = new DataIdentifier("lengthtest"); try { // Write the first file @@ -1003,7 +1004,7 @@ public void testExistsMethod() throws Exception { backend.setProperties(getConfigurationWithConnectionString()); backend.init(); - org.apache.jackrabbit.core.data.DataIdentifier identifier = new org.apache.jackrabbit.core.data.DataIdentifier("existstest"); + DataIdentifier identifier = new DataIdentifier("existstest"); // Test non-existent file assertFalse("Non-existent file should return false", backend.exists(identifier)); @@ -1047,7 +1048,7 @@ public void testAzureBlobStoreDataRecordFunctionality() throws Exception { writer.write(testContent); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = new org.apache.jackrabbit.core.data.DataIdentifier("datarecordtest"); + DataIdentifier identifier = new DataIdentifier("datarecordtest"); try { // Write the file @@ -1285,7 +1286,7 @@ public void testIteratorWithEmptyContainer() throws Exception { try { // Test getAllIdentifiers with empty container - java.util.Iterator identifiers = backend.getAllIdentifiers(); + java.util.Iterator identifiers = backend.getAllIdentifiers(); assertNotNull("Identifiers iterator should not be null", identifiers); assertFalse("Empty container should have no identifiers", identifiers.hasNext()); @@ -1415,8 +1416,8 @@ public void testKeyNameUtilityMethods() throws Exception { } // Test with identifier that will test key name transformation - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("abcd1234567890abcdef"); + DataIdentifier identifier = + new DataIdentifier("abcd1234567890abcdef"); try { // Write and read to test key name transformation @@ -1461,8 +1462,8 @@ public void testLargeFileHandling() throws Exception { writer.write(testContent); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("largefile"); + DataIdentifier identifier = + new DataIdentifier("largefile"); try { // Write the large file @@ -1540,8 +1541,8 @@ public void testDirectAccessMethodsWithDisabledExpiry() throws Exception { writer.write(testContent); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("directaccess"); + DataIdentifier identifier = + new DataIdentifier("directaccess"); try { // Write the file @@ -1590,8 +1591,8 @@ public void testDirectAccessMethodsWithEnabledExpiry() throws Exception { writer.write(testContent); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("directaccess"); + DataIdentifier identifier = + new DataIdentifier("directaccess"); try { // Write the file @@ -1651,7 +1652,7 @@ public void testDirectAccessWithNullParameters() throws Exception { // Test createHttpDownloadURI with null options try { backend.createHttpDownloadURI( - new org.apache.jackrabbit.core.data.DataIdentifier("test"), null); + new DataIdentifier("test"), null); fail("Expected NullPointerException for null options"); } catch (NullPointerException e) { assertEquals("downloadOptions must not be null", e.getMessage()); @@ -1874,8 +1875,8 @@ public void testReadWithContextClassLoaderHandling() throws Exception { writer.write("test content for context class loader"); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("contextclassloadertest"); + DataIdentifier identifier = + new DataIdentifier("contextclassloadertest"); try { // Set a custom context class loader @@ -1915,8 +1916,8 @@ public void testWriteWithBufferedStreamThreshold() throws Exception { writer.write("small content"); // Less than AZURE_BLOB_BUFFERED_STREAM_THRESHOLD } - org.apache.jackrabbit.core.data.DataIdentifier smallId = - new org.apache.jackrabbit.core.data.DataIdentifier("smallfile"); + DataIdentifier smallId = + new DataIdentifier("smallfile"); try { backend.write(smallId, smallFile); @@ -1931,8 +1932,8 @@ public void testWriteWithBufferedStreamThreshold() throws Exception { } } - org.apache.jackrabbit.core.data.DataIdentifier largeId = - new org.apache.jackrabbit.core.data.DataIdentifier("largefile"); + DataIdentifier largeId = + new DataIdentifier("largefile"); backend.write(largeId, largeFile); assertTrue("Large file should be written successfully", backend.exists(largeId)); @@ -1954,8 +1955,8 @@ public void testExistsWithContextClassLoaderHandling() throws Exception { backend.setProperties(getConfigurationWithConnectionString()); backend.init(); - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("existstest"); + DataIdentifier identifier = + new DataIdentifier("existstest"); // Set a custom context class loader ClassLoader originalClassLoader = Thread.currentThread().getContextClassLoader(); @@ -1989,8 +1990,8 @@ public void testDeleteRecordWithContextClassLoaderHandling() throws Exception { writer.write("content to delete"); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("deletetest"); + DataIdentifier identifier = + new DataIdentifier("deletetest"); try { backend.write(identifier, tempFile); @@ -2131,8 +2132,8 @@ public void testCreateHttpDownloadURIWithCacheDisabled() throws Exception { writer.write("download test content"); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("downloadtest"); + DataIdentifier identifier = + new DataIdentifier("downloadtest"); try { backend.write(identifier, tempFile); @@ -2161,8 +2162,8 @@ public void testCreateHttpDownloadURIWithVerifyExistsEnabled() throws Exception backend.setProperties(props); backend.init(); - org.apache.jackrabbit.core.data.DataIdentifier nonExistentId = - new org.apache.jackrabbit.core.data.DataIdentifier("nonexistent"); + DataIdentifier nonExistentId = + new DataIdentifier("nonexistent"); org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions options = org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions.DEFAULT; @@ -2286,8 +2287,8 @@ public void testCompleteHttpUploadWithExistingRecord() throws Exception { writer.write("complete test content"); } - org.apache.jackrabbit.core.data.DataIdentifier identifier = - new org.apache.jackrabbit.core.data.DataIdentifier("completetest"); + DataIdentifier identifier = + new DataIdentifier("completetest"); try { backend.write(identifier, tempFile); diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8Test.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8Test.java index 1da7efe7a1c..2b2c46f832d 100644 --- a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8Test.java +++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/v8/UtilsV8Test.java @@ -26,7 +26,7 @@ import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.junit.After; import org.junit.Test; diff --git a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/AbstractS3DataStoreService.java b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/AbstractS3DataStoreService.java index db28127cf20..d5a2984e47f 100644 --- a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/AbstractS3DataStoreService.java +++ b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/AbstractS3DataStoreService.java @@ -24,8 +24,8 @@ import java.util.Map; import java.util.Properties; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreService; import org.osgi.framework.Constants; diff --git a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java index d79297f11b5..3e2b8a69c09 100644 --- a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java +++ b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java @@ -47,9 +47,9 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.commons.collections.MapUtils; diff --git a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStore.java b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStore.java index c30c01bc014..3af99c9a3b2 100644 --- a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStore.java +++ b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStore.java @@ -19,9 +19,9 @@ import java.net.URI; import java.util.Properties; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.ConfigurableDataRecordAccessProvider; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUploadException; diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderIT.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderIT.java index 8d7808da59d..fa36a39a29c 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderIT.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderIT.java @@ -24,10 +24,10 @@ import javax.net.ssl.HttpsURLConnection; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.AbstractDataRecordAccessProviderIT; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.ConfigurableDataRecordAccessProvider; import org.junit.BeforeClass; diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderTest.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderTest.java index eba404c3a0f..cf59f796458 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderTest.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataRecordAccessProviderTest.java @@ -36,10 +36,10 @@ import javax.net.ssl.HttpsURLConnection; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.AbstractDataRecordAccessProviderTest; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.ConfigurableDataRecordAccessProvider; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload; diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStoreUtils.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStoreUtils.java index 1d6ce896ecf..5b695ebe84e 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStoreUtils.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3DataStoreUtils.java @@ -32,7 +32,7 @@ import javax.net.ssl.HttpsURLConnection; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.MapUtils; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils; diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DSWithSmallCache.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DSWithSmallCache.java index f571c05925f..227e7019bc4 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DSWithSmallCache.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DSWithSmallCache.java @@ -16,8 +16,8 @@ */ package org.apache.jackrabbit.oak.blob.cloud.s3; -import org.apache.jackrabbit.core.data.CachingDataStore; -import org.apache.jackrabbit.core.data.LocalCache; +import org.apache.jackrabbit.oak.spi.blob.data.CachingDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.LocalCache; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DataStore.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DataStore.java index f983cb367d8..884d0b2ce78 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DataStore.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DataStore.java @@ -48,8 +48,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.time.DateUtils; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.junit.After; import org.junit.Before; import org.junit.Rule; diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3Ds.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3Ds.java index b64dfd7bf34..00e79a73c26 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3Ds.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3Ds.java @@ -29,9 +29,9 @@ import javax.jcr.RepositoryException; import org.apache.commons.lang3.time.DateUtils; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreTest; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.ConfigurableDataRecordAccessProvider; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordAccessProvider; diff --git a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DsCacheOff.java b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DsCacheOff.java index a7fe1bd9d15..fb62f1ee8ee 100644 --- a/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DsCacheOff.java +++ b/oak-blob-cloud/src/test/java/org/apache/jackrabbit/oak/blob/cloud/s3/TestS3DsCacheOff.java @@ -16,12 +16,13 @@ */ package org.apache.jackrabbit.oak.blob.cloud.s3; +import org.apache.jackrabbit.oak.spi.blob.data.CachingDataStore; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Test {@link org.apache.jackrabbit.core.data.CachingDataStore} with S3Backend + * Test {@link CachingDataStore} with S3Backend * and local cache Off. * It requires to pass aws config file via system property or system properties by prefixing with 'ds.'. * See details @ {@link S3DataStoreUtils}. diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/AbstractSharedCachingDataStore.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/AbstractSharedCachingDataStore.java index 569f45cd714..718ea36cf09 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/AbstractSharedCachingDataStore.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/AbstractSharedCachingDataStore.java @@ -41,11 +41,11 @@ import org.apache.commons.io.IOUtils; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.commons.time.Stopwatch; -import org.apache.jackrabbit.core.data.AbstractDataStore; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.MultiDataStoreAware; +import org.apache.jackrabbit.oak.spi.blob.data.AbstractDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.MultiDataStoreAware; import org.apache.jackrabbit.guava.common.cache.CacheLoader; import org.apache.jackrabbit.oak.plugins.blob.datastore.TypedDataStore; import org.apache.jackrabbit.oak.spi.blob.AbstractDataRecord; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreCacheStats.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreCacheStats.java index fe93c709b7e..4dd43313777 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreCacheStats.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreCacheStats.java @@ -38,7 +38,7 @@ import org.osgi.service.component.annotations.Component; import org.osgi.service.component.annotations.Deactivate; import org.osgi.service.component.annotations.Reference; -import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/DataStoreCacheUpgradeUtils.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/DataStoreCacheUpgradeUtils.java index 3c913b4c8d8..fcc258bc960 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/DataStoreCacheUpgradeUtils.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/DataStoreCacheUpgradeUtils.java @@ -18,10 +18,7 @@ import java.io.File; import java.io.FileInputStream; -import java.io.IOException; import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -31,6 +28,8 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.serialization.ValidatingObjectInputStream; import org.apache.jackrabbit.oak.commons.io.FileTreeTraverser; +import org.apache.jackrabbit.oak.spi.blob.data.AsyncUploadCache; +import org.apache.jackrabbit.oak.spi.blob.data.CachingDataStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +37,7 @@ /** * Utility methods to upgrade Old DataStore cache - * {@link org.apache.jackrabbit.core.data.CachingDataStore}. + * {@link CachingDataStore}. */ public class DataStoreCacheUpgradeUtils { private static final Logger LOG = LoggerFactory.getLogger(DataStoreCacheUpgradeUtils.class); @@ -48,7 +47,7 @@ public class DataStoreCacheUpgradeUtils { static final String DOWNLOAD_DIR = FileCache.DOWNLOAD_DIR; /** - * De-serialize the pending uploads map from {@link org.apache.jackrabbit.core.data.AsyncUploadCache}. + * De-serialize the pending uploads map from {@link AsyncUploadCache}. * * @param homeDir the directory where the serialized file is maintained * @return the de-serialized map @@ -175,7 +174,7 @@ public static void movePendingUploadsToStaging(File homeDir, File path, boolean } /** - * Upgrades the {@link org.apache.jackrabbit.core.data.CachingDataStore}. + * Upgrades the {@link CachingDataStore}. * * @param homeDir the repository home directory * @param path the root of the datastore diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ExtendedBlobStatsCollector.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ExtendedBlobStatsCollector.java index 55e2713e56f..7c74b2ac29b 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ExtendedBlobStatsCollector.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/ExtendedBlobStatsCollector.java @@ -22,9 +22,10 @@ import java.io.File; import java.util.concurrent.TimeUnit; -import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; import org.apache.jackrabbit.oak.spi.blob.stats.BlobStatsCollector; /** @@ -214,7 +215,7 @@ public void getDownloadURIFailed() { } /** - * Called when a {@link org.apache.jackrabbit.core.data.DataRecord} is retrieved via + * Called when a {@link DataRecord} is retrieved via * a call to {@link SharedDataStore#getRecordForId(DataIdentifier)}. * * @param timeTaken time taken to perform the operation diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java index 7015baab072..866c5ecbba6 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/MarkSweepGarbageCollector.java @@ -61,8 +61,8 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.LineIterator; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStore.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStore.java index 8b584ea1b80..abe828ebd4d 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStore.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStore.java @@ -21,9 +21,9 @@ import java.util.Iterator; import java.util.List; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.jetbrains.annotations.Nullable; /** diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCache.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCache.java index 4210c590714..d63c5a33f55 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCache.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCache.java @@ -46,7 +46,7 @@ import org.apache.commons.lang3.concurrent.BasicThreadFactory; import org.apache.jackrabbit.guava.common.cache.Weigher; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.StringUtils; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; import org.apache.jackrabbit.oak.commons.jmx.AnnotatedStandardMBean; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreService.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreService.java index 0fc566a6f40..434842d42b5 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreService.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreService.java @@ -30,8 +30,8 @@ import javax.jcr.RepositoryException; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.blob.BlobAccessProvider; import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean; import org.apache.jackrabbit.oak.commons.PropertiesUtil; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTracker.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTracker.java index dd7bd3e80b4..67d35c845aa 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTracker.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTracker.java @@ -36,7 +36,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.Predicate; -import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; import org.apache.jackrabbit.oak.commons.collections.IterableUtils; import org.apache.jackrabbit.oak.commons.collections.ListUtils; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java index e63bedce40b..0ac27cafe0a 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStore.java @@ -42,11 +42,11 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.MultiDataStoreAware; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.MultiDataStoreAware; import org.apache.jackrabbit.guava.common.cache.LoadingCache; import org.apache.jackrabbit.guava.common.cache.Weigher; import org.apache.jackrabbit.oak.api.Blob; @@ -80,7 +80,7 @@ /** * BlobStore wrapper for DataStore. Wraps Jackrabbit 2 DataStore and expose them as BlobStores * It also handles inlining binaries if there size is smaller than - * {@link org.apache.jackrabbit.core.data.DataStore#getMinRecordLength()} + * {@link DataStore#getMinRecordLength()} */ public class DataStoreBlobStore implements DataStore, BlobStore, GarbageCollectableBlobStore, BlobTrackingStore, TypedDataStore, diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DbDataStoreService.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DbDataStoreService.java deleted file mode 100644 index c0217f55dab..00000000000 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DbDataStoreService.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.blob.datastore; - -import java.util.Map; - -import org.osgi.service.component.annotations.Component; -import org.osgi.service.component.annotations.ConfigurationPolicy; -import org.osgi.service.component.annotations.Reference; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.db.DbDataStore; -import org.apache.jackrabbit.core.util.db.ConnectionFactory; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.osgi.service.component.ComponentContext; - -@Component(configurationPolicy = ConfigurationPolicy.REQUIRE, name = DbDataStoreService.NAME) -public class DbDataStoreService extends AbstractDataStoreService{ - public static final String NAME = "org.apache.jackrabbit.oak.plugins.blob.datastore.DbDataStore"; - - @Reference - private ConnectionFactory connectionFactory; - - @Reference - private StatisticsProvider statisticsProvider; - - protected StatisticsProvider getStatisticsProvider(){ - return statisticsProvider; - } - - protected void setStatisticsProvider(StatisticsProvider statisticsProvider) { - this.statisticsProvider = statisticsProvider; - } - - @Override - protected DataStore createDataStore(ComponentContext context, Map config) { - DbDataStore dataStore = new DbDataStore(); - dataStore.setConnectionFactory(connectionFactory); - return dataStore; - } -} - diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackend.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackend.java index 28bf7c61232..8b09202b170 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackend.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackend.java @@ -33,10 +33,10 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.io.filefilter.FileFilterUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.LazyFileInputStream; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.AutoClosingLazyFileInputStream; import org.apache.jackrabbit.oak.commons.io.FileTreeTraverser; import org.apache.jackrabbit.oak.spi.blob.AbstractDataRecord; import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend; @@ -92,7 +92,7 @@ public void init() throws DataStoreException { public InputStream read(DataIdentifier identifier) throws DataStoreException { File file = getFile(identifier, fsPathDir); try { - return new LazyFileInputStream(file); + return new AutoClosingLazyFileInputStream(file); } catch (IOException e) { throw new DataStoreException("Error opening input stream of " + file.getAbsolutePath(), e); @@ -454,7 +454,7 @@ public FSBackendDataRecord(AbstractSharedBackend backend, @Override public InputStream getStream() throws DataStoreException { try { - return new LazyFileInputStream(file); + return new AutoClosingLazyFileInputStream(file); } catch (FileNotFoundException e) { LOG.error("Error in returning stream", e); throw new DataStoreException(e); diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FileDataStoreService.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FileDataStoreService.java index b4344715bef..a8eae61deb6 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FileDataStoreService.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FileDataStoreService.java @@ -23,7 +23,7 @@ import org.osgi.service.component.annotations.ConfigurationPolicy; import org.osgi.service.component.annotations.Reference; import java.util.Objects; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore; import org.apache.jackrabbit.oak.stats.StatisticsProvider; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecord.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecord.java index 2a696dc6e8e..b5c56925365 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecord.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecord.java @@ -22,9 +22,9 @@ import java.io.InputStream; import java.util.Arrays; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java index 6a9023ea4b3..b7af7ccfd68 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java @@ -36,11 +36,11 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.io.filefilter.FileFilterUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.FileDataRecord; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.commons.io.FileTreeTraverser; import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore; import org.slf4j.Logger; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java index bb19906d24c..409cc014bbd 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreUtils.java @@ -25,7 +25,7 @@ import java.util.stream.Collectors; import org.apache.commons.collections4.FluentIterable; -import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; import org.apache.jackrabbit.oak.commons.collections.SetUtils; import org.apache.jackrabbit.oak.commons.collections.StreamUtils; import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/TypedDataStore.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/TypedDataStore.java index 2460111e91e..60178d129b9 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/TypedDataStore.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/TypedDataStore.java @@ -20,12 +20,13 @@ import java.io.InputStream; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.spi.blob.BlobOptions; /** - * Interface to provide ability to the {@link org.apache.jackrabbit.core.data.DataStore} + * Interface to provide ability to the {@link DataStore} * to add records with {@link BlobOptions}. */ public interface TypedDataStore { diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordAccessProvider.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordAccessProvider.java index 70f0376277f..7e26022d391 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordAccessProvider.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordAccessProvider.java @@ -20,9 +20,9 @@ import java.net.URI; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordUploadToken.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordUploadToken.java index f67d2c76b56..f9f72bd702a 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordUploadToken.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/DataRecordUploadToken.java @@ -29,6 +29,7 @@ import org.apache.commons.codec.binary.Base64; import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; @@ -55,7 +56,7 @@ public class DataRecordUploadToken { * that call {@link #getEncodedToken(byte[])} after creating the token. * * @param blobId The blob ID, usually a {@link - * org.apache.jackrabbit.core.data.DataIdentifier}. + * DataIdentifier}. * @param uploadId A free-form string used to identify this upload. This * may be provided by the service provider; if not a free-form * upload ID generated by the implementation will suffice. May be diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/package-info.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/package-info.java index 340897416fb..5671ddaa60f 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/package-info.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/package-info.java @@ -21,7 +21,7 @@ * Package related to direct upload/download of data records. */ @Internal -@Version("1.0.1") +@Version("2.0.0") package org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess; import org.apache.jackrabbit.oak.commons.annotations.Internal; diff --git a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/serializer/FSBlobSerializer.java b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/serializer/FSBlobSerializer.java index a46cefe868e..d9cad3411cb 100644 --- a/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/serializer/FSBlobSerializer.java +++ b/oak-blob-plugins/src/main/java/org/apache/jackrabbit/oak/plugins/blob/serializer/FSBlobSerializer.java @@ -24,8 +24,8 @@ import java.io.IOException; import java.io.InputStream; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.json.BlobDeserializer; import org.apache.jackrabbit.oak.json.BlobSerializer; diff --git a/oak-blob-plugins/src/main/resources/org/apache/jackrabbit/oak/plugins/blob/blobstore.properties b/oak-blob-plugins/src/main/resources/org/apache/jackrabbit/oak/plugins/blob/blobstore.properties index 160823b74ba..4ad45cebd27 100644 --- a/oak-blob-plugins/src/main/resources/org/apache/jackrabbit/oak/plugins/blob/blobstore.properties +++ b/oak-blob-plugins/src/main/resources/org/apache/jackrabbit/oak/plugins/blob/blobstore.properties @@ -23,7 +23,7 @@ cloudContainer=oakblobstore cloudProvider=aws-s3 # Common for all data store -dataStoreProvider=org.apache.jackrabbit.core.data.FileDataStore +dataStoreProvider=org.apache.jackrabbit.oak.spi.blob.data.FileDataStore streamCacheSize=256 path=./repository/datastore @@ -54,4 +54,4 @@ sleepBetweenRecords=100 delayedDelete=false delayedDeleteSleep=86400 primary=org.apache.jackrabbit.core.data.db.DbDataStore -archive=org.apache.jackrabbit.core.data.FileDataStore \ No newline at end of file +archive=org.apache.jackrabbit.oak.spi.blob.data.FileDataStore \ No newline at end of file diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/AbstractDataStoreCacheTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/AbstractDataStoreCacheTest.java index 6b4c720b877..83f868986cb 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/AbstractDataStoreCacheTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/AbstractDataStoreCacheTest.java @@ -52,9 +52,9 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.guava.common.cache.CacheLoader; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.spi.blob.AbstractDataRecord; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/CachingDataStoreTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/CachingDataStoreTest.java index eb046262aba..75216604fe3 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/CachingDataStoreTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/CachingDataStoreTest.java @@ -35,9 +35,9 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.filefilter.FileFilterUtils; import org.apache.commons.io.output.NullOutputStream; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; import org.apache.jackrabbit.oak.commons.internal.concurrent.ExecutorUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreStatsTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreStatsTest.java index 45e1e712625..6e720b20535 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreStatsTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ConsolidatedDataStoreStatsTest.java @@ -35,8 +35,8 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.NullOutputStream; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.commons.PathUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreMarkSweepGarbageCollectorTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreMarkSweepGarbageCollectorTest.java index e36a5695799..a560b5e1452 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreMarkSweepGarbageCollectorTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreMarkSweepGarbageCollectorTest.java @@ -18,9 +18,9 @@ */ package org.apache.jackrabbit.oak.plugins.blob; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java index 190e03d271a..90545a498f3 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java @@ -36,9 +36,9 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.commons.collections.IterableUtils; import org.apache.jackrabbit.oak.commons.collections.SetUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/TimeLapsedDataStore.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/TimeLapsedDataStore.java index 48b8d54b211..4a354affda7 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/TimeLapsedDataStore.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/TimeLapsedDataStore.java @@ -41,11 +41,11 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.NullOutputStream; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.MultiDataStoreAware; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.MultiDataStoreAware; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordAccessProvider; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCacheTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCacheTest.java index 9e89fc32f16..e24b4d53e9c 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCacheTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/UploadStagingCacheTest.java @@ -43,7 +43,7 @@ import ch.qos.logback.classic.Level; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.commons.collections.ListUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreTest.java index ac4c1717289..ce87e66ce41 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/AbstractDataStoreTest.java @@ -30,13 +30,14 @@ import javax.jcr.RepositoryException; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.MultiDataStoreAware; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.MultiDataStoreAware; import org.apache.jackrabbit.oak.commons.collections.SetUtils; import org.apache.jackrabbit.oak.commons.testing.RandomInputStream; +import org.apache.jackrabbit.oak.spi.blob.data.TestCaseBase; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -53,7 +54,7 @@ /** * Test base class for {@link DataStore} which covers all scenarios. - * Copied from {@link org.apache.jackrabbit.core.data.TestCaseBase}. + * Copied from {@link TestCaseBase}. */ public abstract class AbstractDataStoreTest { diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerClusterSharedTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerClusterSharedTest.java index 7e36fc065cd..fa0f2b8e662 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerClusterSharedTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerClusterSharedTest.java @@ -30,7 +30,7 @@ import java.util.concurrent.ScheduledFuture; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore; import org.junit.After; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerTest.java index 2235bdee374..f436d0358a0 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobIdTrackerTest.java @@ -31,8 +31,8 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobStoreStatsTestableFileDataStore.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobStoreStatsTestableFileDataStore.java index ec4f6d63c8c..ffcb14b77fc 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobStoreStatsTestableFileDataStore.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/BlobStoreStatsTestableFileDataStore.java @@ -26,9 +26,9 @@ import java.util.Iterator; import java.util.List; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordAccessProvider; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordDownloadOptions; import org.apache.jackrabbit.oak.plugins.blob.datastore.directaccess.DataRecordUpload; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/CachingFileDataStoreTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/CachingFileDataStoreTest.java index f04b30c3338..1cf1231a8aa 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/CachingFileDataStoreTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/CachingFileDataStoreTest.java @@ -24,8 +24,8 @@ import javax.jcr.RepositoryException; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.MapUtils; import org.junit.After; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreStatsTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreStatsTest.java index 95930a4ee50..df68ab1ebf4 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreStatsTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreStatsTest.java @@ -39,10 +39,10 @@ import javax.jcr.RepositoryException; import javax.management.openmbean.CompositeData; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions; import org.apache.jackrabbit.oak.commons.IOUtils; import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreTest.java index f87c0aa2012..94fb6b77df9 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreBlobStoreTest.java @@ -31,10 +31,10 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.collections.IterableUtils; import org.apache.jackrabbit.oak.commons.collections.ListUtils; import org.apache.jackrabbit.oak.commons.collections.SetUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreServiceTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreServiceTest.java index 0f546bf979e..62be326ee81 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreServiceTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreServiceTest.java @@ -33,8 +33,8 @@ import javax.jcr.RepositoryException; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.spi.blob.SharedBackend; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreUtils.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreUtils.java index cf50eb24439..83195199a0b 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreUtils.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/DataStoreUtils.java @@ -24,9 +24,9 @@ import java.util.Properties; import java.util.Random; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.MapUtils; import org.junit.Test; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackendIT.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackendIT.java index 7bcf6623362..1ee6258b432 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackendIT.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/FSBackendIT.java @@ -33,10 +33,10 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.MapUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecordTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecordTest.java index c8dc151f637..6883c798cdf 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecordTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/InMemoryDataRecordTest.java @@ -23,7 +23,7 @@ import java.util.Random; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; import org.junit.Test; import static org.junit.Assert.assertEquals; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStoreTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStoreTest.java index ed6b42960bd..5533190b0b0 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStoreTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStoreTest.java @@ -26,8 +26,8 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.FilenameUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.commons.collections.SetUtils; import org.junit.Rule; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreTest.java index fd8546f2de6..705b5739770 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/SharedDataStoreTest.java @@ -37,10 +37,10 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.FilenameUtils; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.IteratorUtils; import org.apache.jackrabbit.oak.commons.collections.MapUtils; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderIT.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderIT.java index bcb4af5b548..45695bf2b56 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderIT.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderIT.java @@ -28,10 +28,10 @@ import javax.net.ssl.HttpsURLConnection; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.junit.Test; import static org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils.randomStream; diff --git a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java index 7f5b37e7535..e2b71803a18 100644 --- a/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java +++ b/oak-blob-plugins/src/test/java/org/apache/jackrabbit/oak/plugins/blob/datastore/directaccess/AbstractDataRecordAccessProviderTest.java @@ -36,10 +36,10 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.blob.BlobDownloadOptions; import org.apache.jackrabbit.util.Base64; import org.jetbrains.annotations.NotNull; diff --git a/oak-blob/pom.xml b/oak-blob/pom.xml index 70a8a5aa0f0..d9af0535508 100644 --- a/oak-blob/pom.xml +++ b/oak-blob/pom.xml @@ -39,8 +39,9 @@ org.apache.jackrabbit.oak.spi.blob, - org.apache.jackrabbit.oak.spi.blob.stats, - org.apache.jackrabbit.oak.spi.blob.split + org.apache.jackrabbit.oak.spi.blob.data, + org.apache.jackrabbit.oak.spi.blob.split, + org.apache.jackrabbit.oak.spi.blob.stats @@ -81,15 +82,16 @@ jcr 2.0 + + org.apache.jackrabbit - jackrabbit-data - ${jackrabbit.version} + oak-commons + ${project.version} - org.apache.jackrabbit - oak-commons + oak-jackrabbit-api ${project.version} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractDataRecord.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractDataRecord.java index 80ad145a6d9..7300620c483 100644 --- a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractDataRecord.java +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractDataRecord.java @@ -18,8 +18,8 @@ */ package org.apache.jackrabbit.oak.spi.blob; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; /** * Implements {@link DataRecord} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractSharedBackend.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractSharedBackend.java index 6b706caf930..a8e805c6207 100644 --- a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractSharedBackend.java +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/AbstractSharedBackend.java @@ -23,8 +23,8 @@ import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/SharedBackend.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/SharedBackend.java index c1ab12cf740..352d60fe6da 100644 --- a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/SharedBackend.java +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/SharedBackend.java @@ -23,9 +23,9 @@ import java.util.Iterator; import java.util.List; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; /** */ @@ -111,7 +111,7 @@ public interface SharedBackend { * * @param input the record input stream * @param name the name - * @throws org.apache.jackrabbit.core.data.DataStoreException + * @throws DataStoreException */ void addMetadataRecord(final InputStream input, final String name) throws DataStoreException; @@ -120,7 +120,7 @@ public interface SharedBackend { * * @param input the record file * @param name the name - * @throws org.apache.jackrabbit.core.data.DataStoreException + * @throws DataStoreException */ void addMetadataRecord(final File input, final String name) throws DataStoreException; diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractBackend.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractBackend.java new file mode 100644 index 00000000000..aab0a6d68e1 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractBackend.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.jackrabbit.oak.spi.blob.data.util.NamedThreadFactory; + +/** + * Abstract Backend which has a reference to the underlying {@link CachingDataStore} and is + * maintaining the lifecycle of the internal asynchronous write executor. + */ +public abstract class AbstractBackend implements Backend { + + /** + * {@link CachingDataStore} instance using this backend. + */ + private CachingDataStore dataStore; + + /** + * path of repository home dir. + */ + private String homeDir; + + /** + * path of config property file. + */ + private String config; + + /** + * The pool size of asynchronous write pooling executor. + */ + private int asyncWritePoolSize = 10; + + /** + * Asynchronous write pooling executor. + */ + private volatile Executor asyncWriteExecutor; + + /** + * Returns the pool size of the asynchronous write pool executor. + * @return the pool size of the asynchronous write pool executor + */ + public int getAsyncWritePoolSize() { + return asyncWritePoolSize; + } + + /** + * Sets the pool size of the asynchronous write pool executor. + * @param asyncWritePoolSize pool size of the async write pool executor + */ + public void setAsyncWritePoolSize(int asyncWritePoolSize) { + this.asyncWritePoolSize = asyncWritePoolSize; + } + + /** + * {@inheritDoc} + */ + @Override + public void init(CachingDataStore dataStore, String homeDir, String config) throws DataStoreException { + this.dataStore = dataStore; + this.homeDir = homeDir; + this.config = config; + } + + /** + * {@inheritDoc} + */ + @Override + public void close() throws DataStoreException { + Executor asyncExecutor = getAsyncWriteExecutor(); + + if (asyncExecutor != null && asyncExecutor instanceof ExecutorService) { + ((ExecutorService) asyncExecutor).shutdownNow(); + } + } + + /** + * Returns the {@link CachingDataStore} instance using this backend. + * @return the {@link CachingDataStore} instance using this backend + */ + protected CachingDataStore getDataStore() { + return dataStore; + } + + /** + * Sets the {@link CachingDataStore} instance using this backend. + * @param dataStore the {@link CachingDataStore} instance using this backend + */ + protected void setDataStore(CachingDataStore dataStore) { + this.dataStore = dataStore; + } + + /** + * Returns path of repository home dir. + * @return path of repository home dir + */ + protected String getHomeDir() { + return homeDir; + } + + /** + * Sets path of repository home dir. + * @param homeDir path of repository home dir + */ + protected void setHomeDir(String homeDir) { + this.homeDir = homeDir; + } + + /** + * Returns path of config property file. + * @return path of config property file + */ + protected String getConfig() { + return config; + } + + /** + * Sets path of config property file. + * @param config path of config property file + */ + protected void setConfig(String config) { + this.config = config; + } + + /** + * Returns Executor used to execute asynchronous write or touch jobs. + * @return Executor used to execute asynchronous write or touch jobs + */ + protected Executor getAsyncWriteExecutor() { + Executor executor = asyncWriteExecutor; + + if (executor == null) { + synchronized (this) { + executor = asyncWriteExecutor; + if (executor == null) { + asyncWriteExecutor = executor = createAsyncWriteExecutor(); + } + } + } + + return executor; + } + + /** + * Creates an {@link Executor}. + * This method is invoked during the initialization for asynchronous write/touch job executions. + * @return an {@link Executor} + */ + protected Executor createAsyncWriteExecutor() { + Executor asyncExecutor; + + if (dataStore.getAsyncUploadLimit() > 0 && getAsyncWritePoolSize() > 0) { + asyncExecutor = (ThreadPoolExecutor) Executors.newFixedThreadPool(getAsyncWritePoolSize(), + new NamedThreadFactory(getClass().getSimpleName() + "-write-worker")); + } else { + asyncExecutor = new ImmediateExecutor(); + } + + return asyncExecutor; + } + + /** + * This class implements {@link Executor} interface to run {@code command} right away, + * resulting in non-asynchronous mode executions. + */ + private class ImmediateExecutor implements Executor { + @Override + public void execute(Runnable command) { + command.run(); + } + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractDataRecord.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractDataRecord.java new file mode 100644 index 00000000000..947607accdc --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractDataRecord.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + + +/** + * Abstract data record base class. This base class contains only + * a reference to the data identifier of the record and implements + * the standard {@link Object} equality, hash code, and string + * representation methods based on the identifier. + */ +public abstract class AbstractDataRecord implements DataRecord { + + /** + * The data store that contains this record. + */ + private final AbstractDataStore store; + + /** + * The binary identifier; + */ + private final DataIdentifier identifier; + + /** + * Creates a data record with the given identifier. + * + * @param identifier data identifier + */ + public AbstractDataRecord( + AbstractDataStore store, DataIdentifier identifier) { + this.store = store; + this.identifier = identifier; + } + + /** + * Returns the data identifier. + * + * @return data identifier + */ + public DataIdentifier getIdentifier() { + return identifier; + } + + public String getReference() { + return store.getReferenceFromIdentifier(identifier); + } + + /** + * Returns the string representation of the data identifier. + * + * @return string representation + */ + public String toString() { + return identifier.toString(); + } + + /** + * Checks if the given object is a data record with the same identifier + * as this one. + * + * @param object other object + * @return true if the other object is a data record and has + * the same identifier as this one, false otherwise + */ + public boolean equals(Object object) { + return (object instanceof DataRecord) + && identifier.equals(((DataRecord) object).getIdentifier()); + } + + /** + * Returns the hash code of the data identifier. + * + * @return hash code + */ + public int hashCode() { + return identifier.hashCode(); + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractDataStore.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractDataStore.java new file mode 100644 index 00000000000..5aa5965d520 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AbstractDataStore.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.nio.charset.StandardCharsets; +import java.security.SecureRandom; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public abstract class AbstractDataStore implements DataStore { + + private static Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class); + + private static final String ALGORITHM = "HmacSHA1"; + + /** + * Array of hexadecimal digits. + */ + private static final char[] HEX = "0123456789abcdef".toCharArray(); + + /** + * The digest algorithm used to uniquely identify records. + */ + protected String DIGEST = System.getProperty("ds.digest.algorithm", "SHA-256"); + + /** + * Cached copy of the reference key of this data store. Initialized in + * {@link #getReferenceKey()} when the key is first accessed. + */ + private byte[] referenceKey = null; + + //---------------------------------------------------------< DataStore >-- + + public DataRecord getRecord(DataIdentifier identifier) + throws DataStoreException { + DataRecord record = getRecordIfStored(identifier); + if (record != null) { + return record; + } else { + throw new DataStoreException( + "Record " + identifier + " does not exist"); + } + } + + public DataRecord getRecordFromReference(String reference) + throws DataStoreException { + if (reference != null) { + int colon = reference.indexOf(':'); + if (colon != -1) { + DataIdentifier identifier = + new DataIdentifier(reference.substring(0, colon)); + if (reference.equals(getReferenceFromIdentifier(identifier))) { + return getRecordIfStored(identifier); + } + } + } + return null; + } + + //---------------------------------------------------------< protected >-- + + /** + * Returns the hex encoding of the given bytes. + * + * @param value value to be encoded + * @return encoded value + */ + protected static String encodeHexString(byte[] value) { + char[] buffer = new char[value.length * 2]; + for (int i = 0; i < value.length; i++) { + buffer[2 * i] = HEX[(value[i] >> 4) & 0x0f]; + buffer[2 * i + 1] = HEX[value[i] & 0x0f]; + } + return new String(buffer); + } + + protected String getReferenceFromIdentifier(DataIdentifier identifier) { + try { + String id = identifier.toString(); + + Mac mac = Mac.getInstance(ALGORITHM); + mac.init(new SecretKeySpec(getReferenceKey(), ALGORITHM)); + byte[] hash = mac.doFinal(id.getBytes(StandardCharsets.UTF_8)); + + return id + ':' + encodeHexString(hash); + } catch (Exception e) { + LOG.error("Failed to hash identifier using MAC (Message Authentication Code) algorithm.", e); + } + return null; + } + + /** + * Returns the reference key of this data store. If one does not already + * exist, it is automatically created in an implementation-specific way. + * The default implementation simply creates a temporary random key that's + * valid only until the data store gets restarted. Subclasses can override + * and/or decorate this method to support a more persistent reference key. + *

+ * This method is called only once during the lifetime of a data store + * instance and the return value is cached in memory, so it's no problem + * if the implementation is slow. + * + * @return reference key + * @throws DataStoreException if the key is not available + */ + protected byte[] getOrCreateReferenceKey() throws DataStoreException { + byte[] referenceKeyValue = new byte[256]; + new SecureRandom().nextBytes(referenceKeyValue); + return referenceKeyValue; + } + + //-----------------------------------------------------------< private >-- + + /** + * Returns the reference key of this data store. Synchronized to + * control concurrent access to the cached {@link #referenceKey} value. + * + * @return reference key + * @throws DataStoreException if the key is not available + */ + private synchronized byte[] getReferenceKey() throws DataStoreException { + if (referenceKey == null) { + referenceKey = getOrCreateReferenceKey(); + } + return referenceKey; + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncTouchCallback.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncTouchCallback.java new file mode 100644 index 00000000000..68c42c4e98b --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncTouchCallback.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; +/** + * This interface defines callback methods to reflect the status of asynchronous + * touch. + */ +public interface AsyncTouchCallback { + + + /** + * Callback method for successful asynchronous touch. + */ + public void onSuccess(AsyncTouchResult result); + + /** + * Callback method for failed asynchronous touch. + */ + public void onFailure(AsyncTouchResult result); + + /** + * Callback method for aborted asynchronous touch. + */ + public void onAbort(AsyncTouchResult result); + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncTouchResult.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncTouchResult.java new file mode 100644 index 00000000000..0549d391b82 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncTouchResult.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +/** + * + * The class holds the result of asynchronous touch to {@link Backend} + */ +public class AsyncTouchResult { + /** + * {@link DataIdentifier} on which asynchronous touch is initiated. + */ + private final DataIdentifier identifier; + /** + * Any {@link Exception} which is raised in asynchronously touch. + */ + private Exception exception; + + public AsyncTouchResult(DataIdentifier identifier) { + super(); + this.identifier = identifier; + } + + public DataIdentifier getIdentifier() { + return identifier; + } + + public Exception getException() { + return exception; + } + + public void setException(Exception exception) { + this.exception = exception; + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCache.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCache.java new file mode 100644 index 00000000000..658b7719aee --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCache.java @@ -0,0 +1,351 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import java.io.OutputStream; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class holds all in progress uploads. This class contains two data + * structures, one is {@link #asyncUploadMap} which is {@link Map} + * of file path vs lastModified of upload. The second {@link #toBeDeleted} is + * {@link Set} of upload which is marked for delete, while it is already + * in progress. Before starting an asynchronous upload, it requires to invoke + * {@link #add(String)} to add entry to {@link #asyncUploadMap}. After + * asynchronous upload completes, it requires to invoke + * {@link #remove(String)} to remove entry from + * {@link #asyncUploadMap} Any modification to this class are immediately + * persisted to local file system. {@link #asyncUploadMap} is persisted to / + * {@link homeDir}/ {@link #PENDIND_UPLOAD_FILE}. {@link #toBeDeleted} is + * persisted to / {@link homeDir}/ {@link #TO_BE_DELETED_UPLOAD_FILE}. The / + * {@link homeDir} refer to ${rep.home}. + */ +public class AsyncUploadCache { + private static final Logger LOG = LoggerFactory.getLogger(AsyncUploadCache.class); + + /** + * {@link Map} of fileName Vs lastModified to store asynchronous upload. + */ + Map asyncUploadMap = new HashMap(); + + /** + * {@link Set} of fileName which are mark for delete during asynchronous + * Upload. + */ + Set toBeDeleted = new HashSet(); + + String path; + + String homeDir; + + int asyncUploadLimit; + + private File pendingUploads; + + private File toBeDeletedUploads; + + private static final String PENDIND_UPLOAD_FILE = "async-pending-uploads.ser"; + + private static final String TO_BE_DELETED_UPLOAD_FILE = "async-tobedeleted-uploads.ser"; + + /** + * This methods checks if file can be added to {@link #asyncUploadMap}. If + * yes it adds to {@link #asyncUploadMap} and + * {@link #serializeAsyncUploadMap()} the {@link #asyncUploadMap} to disk. + * + * @return {@link AsyncUploadCacheResult} if successfully added to + * asynchronous uploads it sets + * {@link AsyncUploadCacheResult#setAsyncUpload(boolean)} to true + * else sets to false. + */ + public synchronized AsyncUploadCacheResult add(String fileName) + throws IOException { + AsyncUploadCacheResult result = new AsyncUploadCacheResult(); + if (asyncUploadMap.entrySet().size() >= asyncUploadLimit) { + LOG.info( + "Async write limit [{}] reached. File [{}] not added to async write cache.", + asyncUploadLimit, fileName); + LOG.debug("AsyncUploadCache size=[{}] and entries =[{}]", + asyncUploadMap.size(), asyncUploadMap.keySet()); + result.setAsyncUpload(false); + } else { + long startTime = System.currentTimeMillis(); + if (toBeDeleted.remove(fileName)) { + serializeToBeDeleted(); + } + asyncUploadMap.put(fileName, System.currentTimeMillis()); + serializeAsyncUploadMap(); + LOG.debug("added file [{}] to asyncUploadMap upoad took [{}] sec", + fileName, ((System.currentTimeMillis() - startTime) / 1000)); + LOG.debug("AsyncUploadCache size=[{}] and entries =[{}]", + asyncUploadMap.size(), asyncUploadMap.keySet()); + result.setAsyncUpload(true); + } + return result; + } + + /** + * This methods removes file (if found) from {@link #asyncUploadMap}. If + * file is found, it immediately serializes the {@link #asyncUploadMap} to + * disk. This method sets + * {@link AsyncUploadCacheResult#setRequiresDelete(boolean)} to true, if + * asynchronous upload found to be in {@link #toBeDeleted} set i.e. marked + * for delete. + */ + public synchronized AsyncUploadCacheResult remove(String fileName) + throws IOException { + long startTime = System.currentTimeMillis(); + Long retVal = asyncUploadMap.remove(fileName); + if (retVal != null) { + serializeAsyncUploadMap(); + LOG.debug("removed file [{}] from asyncUploadMap took [{}] sec", + fileName, ((System.currentTimeMillis() - startTime) / 1000)); + LOG.debug("AsyncUploadCache size=[{}] and entries =[{}]", + asyncUploadMap.size(), asyncUploadMap.keySet()); + } else { + LOG.debug("cannot removed file [{}] from asyncUploadMap took [{}] sec. File not found.", + fileName, ((System.currentTimeMillis() - startTime) / 1000)); + LOG.debug("AsyncUploadCache size=[{}] and entries =[{}]", + asyncUploadMap.size(), asyncUploadMap.keySet()); + } + AsyncUploadCacheResult result = new AsyncUploadCacheResult(); + result.setRequiresDelete(toBeDeleted.contains(fileName)); + return result; + } + + /** + * This methods returns the in progress asynchronous uploads which are not + * marked for delete. + */ + public synchronized Set getAll() { + Set retVal = new HashSet(); + retVal.addAll(asyncUploadMap.keySet()); + retVal.removeAll(toBeDeleted); + return retVal; + } + + /** + * This methos checks if asynchronous upload is in progress for @param + * fileName. If @param touch is true, the lastModified is updated to current + * time. + */ + public synchronized boolean hasEntry(String fileName, boolean touch) + throws IOException { + boolean contains = asyncUploadMap.containsKey(fileName) + && !toBeDeleted.contains(fileName); + if (touch && contains) { + long timeStamp = System.currentTimeMillis(); + asyncUploadMap.put(fileName, timeStamp); + serializeAsyncUploadMap(); + } + return contains; + } + + /** + * Returns lastModified from {@link #asyncUploadMap} if found else returns + * 0. + */ + public synchronized long getLastModified(String fileName) { + return asyncUploadMap.get(fileName) != null + && !toBeDeleted.contains(fileName) + ? asyncUploadMap.get(fileName) + : 0; + } + + /** + * This methods deletes asynchronous upload for @param fileName if there + * exists asynchronous upload for @param fileName. + */ + public synchronized void delete(String fileName) throws IOException { + boolean serialize = false; + if (toBeDeleted.remove(fileName)) { + serialize = true; + } + if (asyncUploadMap.containsKey(fileName) && toBeDeleted.add(fileName)) { + serialize = true; + } + if (serialize) { + serializeToBeDeleted(); + } + } + + /** + * Delete in progress asynchronous uploads which are older than @param min. + * This method leverage lastModified stored in {@link #asyncUploadMap} + */ + public synchronized Set deleteOlderThan(long min) + throws IOException { + min = min - 1000; + LOG.info("deleteOlderThan min [{}]", min); + Set deleteSet = new HashSet(); + for (Map.Entry entry : asyncUploadMap.entrySet()) { + if (entry.getValue() < min) { + deleteSet.add(entry.getKey()); + } + } + if (deleteSet.size() > 0) { + LOG.debug("deleteOlderThan set [{}]", deleteSet); + toBeDeleted.addAll(deleteSet); + serializeToBeDeleted(); + } + return deleteSet; + } + + /** + * @param homeDir + * home directory of repository. + * @param path + * path of the {@link LocalCache} + * @param asyncUploadLimit + * the maximum number of asynchronous uploads + */ + public synchronized void init(String homeDir, String path, + int asyncUploadLimit) throws IOException, ClassNotFoundException { + this.homeDir = homeDir; + this.path = path; + this.asyncUploadLimit = asyncUploadLimit; + LOG.info( + "AsynWriteCache:homeDir=[{}], path=[{}], asyncUploadLimit=[{}].", + new Object[] { homeDir, path, asyncUploadLimit }); + pendingUploads = new File(homeDir + "/" + PENDIND_UPLOAD_FILE); + toBeDeletedUploads = new File(homeDir + "/" + TO_BE_DELETED_UPLOAD_FILE); + if (pendingUploads.exists()) { + deserializeAsyncUploadMap(); + } else { + pendingUploads.createNewFile(); + asyncUploadMap = new HashMap(); + serializeAsyncUploadMap(); + } + + if (toBeDeletedUploads.exists()) { + deserializeToBeDeleted(); + } else { + toBeDeletedUploads.createNewFile(); + asyncUploadMap = new HashMap(); + serializeToBeDeleted(); + } + } + + /** + * Reset the {@link AsyncUploadCache} to empty {@link #asyncUploadMap} and + * {@link #toBeDeleted} + */ + public synchronized void reset() throws IOException { + if (!pendingUploads.exists()) { + pendingUploads.createNewFile(); + } + pendingUploads.createNewFile(); + asyncUploadMap = new HashMap(); + serializeAsyncUploadMap(); + + if (!toBeDeletedUploads.exists()) { + toBeDeletedUploads.createNewFile(); + } + toBeDeletedUploads.createNewFile(); + toBeDeleted = new HashSet(); + serializeToBeDeleted(); + } + + /** + * Serialize {@link #asyncUploadMap} to local file system. + */ + private synchronized void serializeAsyncUploadMap() throws IOException { + + // use buffering + OutputStream fos = new FileOutputStream(pendingUploads); + OutputStream buffer = new BufferedOutputStream(fos); + ObjectOutput output = new ObjectOutputStream(buffer); + try { + output.writeObject(asyncUploadMap); + output.flush(); + } finally { + output.close(); + IOUtils.closeQuietly(buffer); + + } + } + + /** + * Deserialize {@link #asyncUploadMap} from local file system. + */ + private synchronized void deserializeAsyncUploadMap() throws IOException, + ClassNotFoundException { + // use buffering + InputStream fis = new FileInputStream(pendingUploads); + InputStream buffer = new BufferedInputStream(fis); + ObjectInput input = new ObjectInputStream(buffer); + try { + asyncUploadMap = (Map) input.readObject(); + } finally { + input.close(); + IOUtils.closeQuietly(buffer); + } + } + + /** + * Serialize {@link #toBeDeleted} to local file system. + */ + private synchronized void serializeToBeDeleted() throws IOException { + + // use buffering + OutputStream fos = new FileOutputStream(toBeDeletedUploads); + OutputStream buffer = new BufferedOutputStream(fos); + ObjectOutput output = new ObjectOutputStream(buffer); + try { + output.writeObject(toBeDeleted); + output.flush(); + } finally { + output.close(); + IOUtils.closeQuietly(buffer); + } + } + + /** + * Deserialize {@link #toBeDeleted} from local file system. + */ + private synchronized void deserializeToBeDeleted() throws IOException, + ClassNotFoundException { + // use buffering + InputStream fis = new FileInputStream(toBeDeletedUploads); + InputStream buffer = new BufferedInputStream(fis); + ObjectInput input = new ObjectInputStream(buffer); + try { + toBeDeleted = (Set) input.readObject(); + } finally { + input.close(); + IOUtils.closeQuietly(buffer); + } + } +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCacheResult.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCacheResult.java new file mode 100644 index 00000000000..1e7cd80776f --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCacheResult.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; + +/** + * This class holds result of asynchronous upload from {@link AsyncUploadCache} + */ +public class AsyncUploadCacheResult { + + /** + * flag to indicate that asynchronous upload can be started on file. + */ + private boolean asyncUpload; + + /** + * flag to indicate that cached file requires to be deleted. It is + * applicable in case where file marked for delete before asynchronous + * upload completes. + */ + private boolean requiresDelete; + + private File file; + + /** + * Flag to denote that asynchronous upload can be started on file. + */ + public boolean canAsyncUpload() { + return asyncUpload; + } + + public void setAsyncUpload(boolean asyncUpload) { + this.asyncUpload = asyncUpload; + } + + /** + * Flag to indicate that record to be deleted from {@link DataStore}. + */ + public boolean doRequiresDelete() { + return requiresDelete; + } + + public void setRequiresDelete(boolean requiresDelete) { + this.requiresDelete = requiresDelete; + } + + public File getFile() { + return file; + } + + public void setFile(File file) { + this.file = file; + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCallback.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCallback.java new file mode 100644 index 00000000000..c22e9f0ef64 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadCallback.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +/** + * This interface defines callback methods to reflect the status of asynchronous + * upload. + */ +public interface AsyncUploadCallback { + + /** + * Callback method for successful asynchronous upload. + */ + public void onSuccess(AsyncUploadResult result); + + /** + * Callback method for failed asynchronous upload. + */ + public void onFailure(AsyncUploadResult result); + + /** + * Callback method for aborted asynchronous upload. + */ + public void onAbort(AsyncUploadResult result); +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadResult.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadResult.java new file mode 100644 index 00000000000..e2645b89fdd --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AsyncUploadResult.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; + +/** + * + * The class holds the result of asynchronous upload to {@link Backend} + */ +public class AsyncUploadResult { + /** + * {@link DataIdentifier} on which asynchronous upload is initiated. + */ + private final DataIdentifier identifier; + + /** + * {@link File} which is asynchronously uploaded. + */ + private final File file; + + /** + * Any {@link Exception} which is raised in asynchronously upload. + */ + private Exception exception; + + public AsyncUploadResult(DataIdentifier identifier, File file) { + super(); + this.identifier = identifier; + this.file = file; + } + + public DataIdentifier getIdentifier() { + return identifier; + } + + public File getFile() { + return file; + } + + public Exception getException() { + return exception; + } + + public void setException(Exception exception) { + this.exception = exception; + } +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AutoClosingLazyFileInputStream.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AutoClosingLazyFileInputStream.java new file mode 100644 index 00000000000..44a27a258fc --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/AutoClosingLazyFileInputStream.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; + +import org.apache.commons.io.input.AutoCloseInputStream; + +/** + * This input stream delays opening the file until the first byte is read, and + * closes and discards the underlying stream as soon as the end of input has + * been reached or when the stream is explicitly closed. + *

+ * This class differs from {@link org.apache.jackrabbit.util.LazyFileInputStream} + * in the auto-closing behavior. + *

+ * It is similar to {@link org.apache.jackrabbit.oak.commons.io.LazyInputStream} + * in that it delays opening the wrapped stream, but comes with additional + * handling of {@link File}s. + */ +public class AutoClosingLazyFileInputStream extends AutoCloseInputStream { + + /** + * The file to read from. + */ + protected final File file; + + /** + * True if the input stream was opened. It is also set to true if the stream + * was closed without reading (to avoid opening the file after the stream + * was closed). + */ + protected boolean opened; + + /** + * Creates a new LazyFileInputStream for the given file. If the + * file is unreadable, a FileNotFoundException is thrown. + * The file is not opened until the first byte is read from the stream. + * + * @param file the file + * @throws java.io.FileNotFoundException + */ + public AutoClosingLazyFileInputStream(File file) + throws FileNotFoundException { + super(null); + if (!file.canRead()) { + throw new FileNotFoundException(file.getPath()); + } + this.file = file; + } + + /** + * Open the stream if required. + * + * @throws java.io.IOException + */ + protected void open() throws IOException { + if (!opened) { + opened = true; + in = new FileInputStream(file); + } + } + + public int read() throws IOException { + open(); + return super.read(); + } + + public int available() throws IOException { + open(); + return super.available(); + } + + public void close() throws IOException { + // make sure the file is not opened afterward + opened = true; + + // only close the file if it was in fact opened + if (in != null) { + super.close(); + } + } + + public synchronized void reset() throws IOException { + open(); + super.reset(); + } + + public boolean markSupported() { + try { + open(); + } catch (IOException e) { + throw new IllegalStateException(e.toString()); + } + return super.markSupported(); + } + + public synchronized void mark(int readlimit) { + try { + open(); + } catch (IOException e) { + throw new IllegalStateException(e.toString()); + } + super.mark(readlimit); + } + + public long skip(long n) throws IOException { + open(); + return super.skip(n); + } + + public int read(byte[] b) throws IOException { + open(); + return super.read(b, 0, b.length); + } + + public int read(byte[] b, int off, int len) throws IOException { + open(); + return super.read(b, off, len); + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/Backend.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/Backend.java new file mode 100644 index 00000000000..e108302edb4 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/Backend.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.io.InputStream; +import java.util.Iterator; +import java.util.Set; + +/** + * The interface defines the backend which can be plugged into + * {@link CachingDataStore}. + */ +public interface Backend { + + /** + * This method initialize backend with the configuration. + * + * @param store + * {@link CachingDataStore} + * @param homeDir + * path of repository home dir. + * @param config + * path of config property file. + * @throws DataStoreException + */ + void init(CachingDataStore store, String homeDir, String config) + throws DataStoreException; + + /** + * Return inputstream of record identified by identifier. + * + * @param identifier + * identifier of record. + * @return inputstream of the record. + * @throws DataStoreException + * if record not found or any error. + */ + InputStream read(DataIdentifier identifier) throws DataStoreException; + + /** + * Return length of record identified by identifier. + * + * @param identifier + * identifier of record. + * @return length of the record. + * @throws DataStoreException + * if record not found or any error. + */ + long getLength(DataIdentifier identifier) throws DataStoreException; + + /** + * Return lastModified of record identified by identifier. + * + * @param identifier + * identifier of record. + * @return lastModified of the record. + * @throws DataStoreException + * if record not found or any error. + */ + long getLastModified(DataIdentifier identifier) throws DataStoreException; + + /** + * Stores file to backend with identifier used as key. If key pre-exists, it + * updates the timestamp of the key. + * + * @param identifier + * key of the file + * @param file + * file that would be stored in backend. + * @throws DataStoreException + * for any error. + */ + void write(DataIdentifier identifier, File file) throws DataStoreException; + + /** + * Write file to backend in asynchronous mode. + * + * @param identifier + * @param file + * @param callback + * Callback interface to called after upload succeed or failed. + * @throws DataStoreException + */ + void writeAsync(DataIdentifier identifier, File file, + AsyncUploadCallback callback) throws DataStoreException; + + /** + * Returns identifiers of all records that exists in backend. + * + * @return iterator consisting of all identifiers + * @throws DataStoreException + */ + Iterator getAllIdentifiers() throws DataStoreException; + + /** + * This method check the existence of record in backend. Return true if + * records exists else false. This method also touch record identified by + * identifier if touch is true. + * + * @param identifier + * @throws DataStoreException + */ + boolean exists(DataIdentifier identifier, boolean touch) + throws DataStoreException; + + /** + * This method check the existence of record in backend. + * + * @param identifier + * identifier to be checked. + * @return true if records exists else false. + * @throws DataStoreException + */ + boolean exists(DataIdentifier identifier) throws DataStoreException; + + /** + * Update the lastModified of record if it's lastModified < minModifiedDate. + * + * @param identifier + * @param minModifiedDate + * @throws DataStoreException + */ + void touch(final DataIdentifier identifier, long minModifiedDate) + throws DataStoreException; + + /** + * Update the lastModified of record if it's lastModified < minModifiedDate + * asynchronously. Result of update is passed using appropriate + * {@link AsyncTouchCallback} methods. If identifier's lastModified > + * minModified {@link AsyncTouchCallback#onAbort(AsyncTouchResult)} is + * called. Any exception is communicated through + * {@link AsyncTouchCallback#onFailure(AsyncTouchResult)} . On successful + * update of lastModified, + * {@link AsyncTouchCallback#onSuccess(AsyncTouchResult)} + * is invoked. + * + * @param identifier + * @param minModifiedDate + * @param callback + * @throws DataStoreException + */ + void touchAsync(final DataIdentifier identifier, long minModifiedDate, + final AsyncTouchCallback callback) throws DataStoreException; + + /** + * Close backend and release resources like database connection if any. + * + * @throws DataStoreException + */ + void close() throws DataStoreException; + + /** + * Delete all records which are older than timestamp. + * + * @param timestamp + * @return {@link Set} of identifiers which are deleted. + * @throws DataStoreException + */ + Set deleteAllOlderThan(long timestamp) + throws DataStoreException; + + /** + * Delete record identified by identifier. No-op if identifier not found. + * + * @param identifier + * @throws DataStoreException + */ + void deleteRecord(DataIdentifier identifier) throws DataStoreException; +} + diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/BackendResourceAbortable.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/BackendResourceAbortable.java new file mode 100644 index 00000000000..dda44db70e5 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/BackendResourceAbortable.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.InputStream; + +/** + * {@link Backend} resource abstraction, such as {@link InputStream}, which can be aborted without consuming it + * fully for efficiency. + *

+ * Some {@link Backend} implementations such as S3Backend may return an abortable InputStream + * for a more optimal resource use. S3Backend internally uses Apache HttpClient library which tries + * to reuse HTTP connections by reading data fully to the end of an attached InputStream on {@link InputStream#close()} + * by default. It can be efficient from a socket pool management perspective, but possibly a significant overhead + * while bytes are read from S3 just to be discarded. So, a {@link Backend} implementation that retrieves an abortable + * resource may decide to wrap the underlying resource (e.g, InputStream) by this interface (e.g, + * S3BackendResourceAbortableInputStream) in order to abort the underlying resources (e.g, http request + * object) without having to read data fully. + */ +public interface BackendResourceAbortable { + + /** + * Abort the underlying backend resource(s). + */ + public void abort(); + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingDataRecord.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingDataRecord.java new file mode 100644 index 00000000000..1d45ca323bc --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingDataRecord.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.InputStream; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * CachingDataRecord which stores reference to {@link CachingDataStore}. This + * class doesn't store any references to attributes but attributes are fetched + * on demand from {@link CachingDataStore}. + */ +public class CachingDataRecord extends AbstractDataRecord { + + private static final Logger LOG = LoggerFactory.getLogger(CachingDataRecord.class); + + private final CachingDataStore store; + + public CachingDataRecord(CachingDataStore store, DataIdentifier identifier) { + super(store, identifier); + this.store = store; + } + + @Override + public long getLastModified() { + try { + return store.getLastModified(getIdentifier()); + } catch (DataStoreException dse) { + LOG.info("exception in getLastModified for identifier [" + + getIdentifier() + "]. returning 0.", dse); + return 0; + } + } + + @Override + public long getLength() throws DataStoreException { + return store.getLength(getIdentifier()); + } + + @Override + public InputStream getStream() throws DataStoreException { + return store.getStream(getIdentifier()); + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingDataStore.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingDataStore.java new file mode 100644 index 00000000000..fbd963eff42 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingDataStore.java @@ -0,0 +1,1398 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.ref.WeakReference; +import java.nio.charset.StandardCharsets; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.WeakHashMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.oak.spi.blob.data.util.NamedThreadFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A caching data store that consists of {@link LocalCache} and {@link Backend}. + * {@link Backend} is single source of truth. All methods first try to fetch + * information from {@link LocalCache}. If record is not available in + * {@link LocalCache}, then it is fetched from {@link Backend} and saved to + * {@link LocalCache} for further access. This class is designed to work without + * {@link LocalCache} and then all information is fetched from {@link Backend}. + * To disable {@link LocalCache} set {@link #setCacheSize(long)} to 0. * + * Configuration: + * + *

+ * <DataStore class="org.apache.jackrabbit.aws.ext.ds.CachingDataStore">
+ * 
+ *     <param name="{@link #setPath(String) path}" value="/data/datastore"/>
+ *     <param name="{@link #setConfig(String) config}" value="${rep.home}/backend.properties"/>
+ *     <param name="{@link #setCacheSize(long) cacheSize}" value="68719476736"/>
+ *     <param name="{@link #setSecret(String) secret}" value="123456"/>
+ *     <param name="{@link #setCachePurgeTrigFactor(double)}" value="0.95d"/>
+ *     <param name="{@link #setCachePurgeResizeFactor(double) cacheSize}" value="0.85d"/>
+ *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
+ *     <param name="{@link #setContinueOnAsyncUploadFailure(boolean) continueOnAsyncUploadFailure}" value="false"/>
+ *     <param name="{@link #setConcurrentUploadsThreads(int) concurrentUploadsThreads}" value="10"/>
+ *     <param name="{@link #setAsyncUploadLimit(int) asyncUploadLimit}" value="100"/>
+ *     <param name="{@link #setUploadRetries(int) uploadRetries}" value="3"/>
+ *     <param name="{@link #setTouchAsync(boolean) touchAsync}" value="false"/>
+ *     <param name="{@link #setProactiveCaching(boolean) proactiveCaching}" value="true"/>
+ *     <param name="{@link #setRecLengthCacheSize(int) recLengthCacheSize}" value="200"/>
+ * </DataStore>
+ * 
+ */ +public abstract class CachingDataStore extends AbstractDataStore implements + MultiDataStoreAware, AsyncUploadCallback, AsyncTouchCallback { + + /** + * Logger instance. + */ + private static final Logger LOG = LoggerFactory.getLogger(CachingDataStore.class); + + private static final String DS_STORE = ".DS_Store"; + + /** + * Name of the directory used for temporary files. Must be at least 3 + * characters. + */ + private static final String TMP = "tmp"; + + /** + * All data identifiers that are currently in use are in this set until they + * are garbage collected. + */ + protected Map> inUse = Collections.synchronizedMap(new WeakHashMap>()); + + /** + * In memory map to hold failed asynchronous upload {@link DataIdentifier} + * and its retry count. Once if all retries are exhausted or file is + * successfully uploaded, then corresponding entry is flushed from the map. + * As all failed uploads are synchronously uploaded at startup, this map + * is not required to be persisted. + */ + protected final Map uploadRetryMap = new ConcurrentHashMap(5); + + /** + * In memory map to hold in-progress asynchronous touch. Once touch is + * successful corresponding entry is flushed from the map. + */ + protected final Map asyncTouchCache = new ConcurrentHashMap(5); + + /** + * In memory map to hold in-progress asynchronous downloads. Once + * download is finished corresponding entry is flushed from the map. + */ + protected final Map asyncDownloadCache = new ConcurrentHashMap(5); + + /** + * In memory cache to hold {@link DataRecord#getLength()} against + * {@link DataIdentifier} + */ + protected Map recLenCache = null; + + protected Backend backend; + + /** + * The minimum size of an object that should be stored in this data store. + */ + private int minRecordLength = 16 * 1024; + + private String path; + + private File directory; + + private File tmpDir; + + private String secret; + + /** + * Flag to indicate if lastModified is updated asynchronously. + */ + private boolean touchAsync = false; + + /** + * Flag to indicate that binary content will be cached proactively and + * asynchronously when binary metadata is retrieved from {@link Backend}. + */ + private boolean proactiveCaching = true; + + /** + * The optional backend configuration. + */ + private String config; + + /** + * The minimum modified date. If a file is accessed (read or write) with a + * modified date older than this value, the modified date is updated to the + * current time. + */ + private long minModifiedDate; + + /** + * Cache purge trigger factor. Cache will undergo in auto-purge mode if + * cache current size is greater than cachePurgeTrigFactor * cacheSize + */ + private double cachePurgeTrigFactor = 0.95d; + + /** + * Cache resize factor. After auto-purge mode, cache current size would just + * greater than cachePurgeResizeFactor * cacheSize cacheSize + */ + private double cachePurgeResizeFactor = 0.85d; + + /** + * The number of bytes in the cache. The default value is 64 GB. + */ + private long cacheSize = 64L * 1024 * 1024 * 1024; + + /** + * The number of retries for failed upload. + */ + private int uploadRetries = 3; + + /** + * The local file system cache. + */ + private LocalCache cache; + + /** + * Caching holding pending uploads + */ + private AsyncUploadCache asyncWriteCache; + + /** + * {@link ExecutorService} to asynchronous downloads + */ + private ExecutorService downloadExecService; + + protected abstract Backend createBackend(); + + protected abstract String getMarkerFile(); + + /** + * In {@link #init(String)},it resumes all incomplete asynchronous upload + * from {@link AsyncUploadCache} and uploads them concurrently in multiple + * threads. It throws {@link RepositoryException}, if file is not found in + * local cache for that asynchronous upload. As far as code is concerned, it + * is only possible when somebody has removed files from local cache + * manually. If there is an exception and user want to proceed with + * inconsistencies, set parameter continueOnAsyncUploadFailure to true in + * repository.xml. This will ignore {@link RepositoryException} and log all + * missing files and proceed after resetting {@link AsyncUploadCache} . + */ + private boolean continueOnAsyncUploadFailure; + + /** + * The {@link #init(String)} methods checks for {@link #getMarkerFile()} and + * if it doesn't exists migrates all files from fileystem to {@link Backend} + * . This parameter governs number of threads which will upload files + * concurrently to {@link Backend}. + */ + private int concurrentUploadsThreads = 10; + + /** + * This parameter limits the number of asynchronous uploads slots to + * {@link Backend}. Once this limit is reached, further uploads to + * {@link Backend} are synchronous, till one of asynchronous uploads + * completes and make asynchronous uploads slot available. To disable + * asynchronous upload, set {@link #asyncUploadLimit} parameter to 0 in + * repository.xml. By default it is 100 + */ + private int asyncUploadLimit = 100; + + /** + * Size of {@link #recLenCache}. Each entry consumes of approx 140 bytes. + * Default total memory consumption of {@link #recLenCache} 28KB. + */ + private int recLengthCacheSize = 200; + + /** + * Initialized the data store. If the path is not set, <repository + * home>/repository/datastore is used. This directory is automatically + * created if it does not yet exist. During first initialization, it upload + * all files from local datastore to backed and local datastore act as a + * local cache. + */ + @Override + public void init(String homeDir) throws RepositoryException { + try { + if (path == null) { + path = homeDir + "/repository/datastore"; + } + // create tmp inside path + tmpDir = new File(path, "tmp"); + LOG.info("path=[{}], tmpPath=[{}]", path, tmpDir.getAbsolutePath()); + directory = new File(path); + mkdirs(directory); + mkdirs(new File(homeDir)); + + if (!mkdirs(tmpDir)) { + FileUtils.cleanDirectory(tmpDir); + LOG.info("tmp=[{}] cleaned.", tmpDir.getPath()); + } + boolean asyncWriteCacheInitStatus = true; + try { + asyncWriteCache = new AsyncUploadCache(); + asyncWriteCache.init(homeDir, path, asyncUploadLimit); + } catch (Exception e) { + LOG.warn("Failed to initialize asyncWriteCache", e); + asyncWriteCacheInitStatus = false; + } + backend = createBackend(); + backend.init(this, path, config); + String markerFileName = getMarkerFile(); + if (markerFileName != null && !"".equals(markerFileName.trim())) { + // create marker file in homeDir to avoid deletion in cache + // cleanup. + File markerFile = new File(homeDir, markerFileName); + if (!markerFile.exists()) { + LOG.info("load files from local cache"); + uploadFilesFromCache(); + try { + markerFile.createNewFile(); + } catch (IOException e) { + throw new DataStoreException( + "Could not create marker file " + + markerFile.getAbsolutePath(), e); + } + } else { + LOG.info("marker file = [{}] exists ", + markerFile.getAbsolutePath()); + if (!asyncWriteCacheInitStatus) { + LOG.info("Initialization of asyncWriteCache failed. " + + "Re-loading all files from local cache"); + uploadFilesFromCache(); + asyncWriteCache.reset(); + } + } + } else { + throw new DataStoreException("Failed to intialized DataStore." + + " MarkerFileName is null or empty. "); + } + // upload any leftover async uploads to backend during last shutdown + Set fileList = asyncWriteCache.getAll(); + if (fileList != null && !fileList.isEmpty()) { + List errorFiles = new ArrayList(); + LOG.info("Uploading [{}] and size=[{}] from AsyncUploadCache.", + fileList, fileList.size()); + long totalSize = 0; + List files = new ArrayList(fileList.size()); + for (String fileName : fileList) { + File f = new File(path, fileName); + if (!f.exists()) { + errorFiles.add(fileName); + LOG.error( + "Cannot upload pending file [{}]. File doesn't exist.", + f.getAbsolutePath()); + } else { + totalSize += f.length(); + files.add(new File(path, fileName)); + } + } + new FilesUploader(files, totalSize, concurrentUploadsThreads, + true).upload(); + if (!continueOnAsyncUploadFailure && errorFiles.size() > 0) { + LOG.error( + "Pending uploads of files [{}] failed. Files do not exist in Local cache.", + errorFiles); + LOG.error("To continue set [continueOnAsyncUploadFailure] " + + "to true in Datastore configuration in " + + "repository.xml. There would be inconsistent data " + + "in repository due the missing files. "); + throw new RepositoryException( + "Cannot upload async uploads from local cache. Files not found."); + } else { + if (errorFiles.size() > 0) { + LOG.error( + "Pending uploads of files [{}] failed. Files do" + + " not exist in Local cache. Continuing as " + + "[continueOnAsyncUploadFailure] is set to true.", + errorFiles); + } + LOG.info("Reseting AsyncWrite Cache list."); + asyncWriteCache.reset(); + } + } + downloadExecService = Executors.newFixedThreadPool(5, + new NamedThreadFactory("backend-file-download-worker")); + cache = new LocalCache(path, tmpDir.getAbsolutePath(), cacheSize, + cachePurgeTrigFactor, cachePurgeResizeFactor, asyncWriteCache); + /* + * Initialize LRU cache of size {@link #recLengthCacheSize} + */ + recLenCache = Collections.synchronizedMap(new LinkedHashMap( + recLengthCacheSize, 0.75f, true) { + + private static final long serialVersionUID = -8752749075395630485L; + + @Override + protected boolean removeEldestEntry( + Map.Entry eldest) { + if (size() > recLengthCacheSize) { + LOG.trace("evicted from recLengthCache [{}]", + eldest.getKey()); + return true; + } + return false; + } + }); + } catch (Exception e) { + throw new RepositoryException(e); + } + } + + /** + * Creates a new data record in {@link Backend}. The stream is first + * consumed and the contents are saved in a temporary file and the {@link #DIGEST} + * message digest of the stream is calculated. If a record with the same + * {@link #DIGEST} digest (and length) is found then it is returned. Otherwise new + * record is created in {@link Backend} and the temporary file is moved in + * place to {@link LocalCache}. + * + * @param input + * binary stream + * @return {@link CachingDataRecord} + * @throws DataStoreException + * if the record could not be created. + */ + @Override + public DataRecord addRecord(InputStream input) throws DataStoreException { + File temporary = null; + long startTime = System.currentTimeMillis(); + long length = 0; + try { + temporary = newTemporaryFile(); + DataIdentifier tempId = new DataIdentifier(temporary.getName()); + usesIdentifier(tempId); + // Copy the stream to the temporary file and calculate the + // stream length and the message digest of the stream + MessageDigest digest = MessageDigest.getInstance(DIGEST); + OutputStream output = new DigestOutputStream(new FileOutputStream( + temporary), digest); + try { + length = IOUtils.copyLarge(input, output); + } finally { + output.close(); + } + long currTime = System.currentTimeMillis(); + DataIdentifier identifier = new DataIdentifier( + encodeHexString(digest.digest())); + LOG.debug("Digest of [{}], length =[{}] took [{}]ms ", + new Object[] { identifier, length, (currTime - startTime) }); + String fileName = getFileName(identifier); + AsyncUploadCacheResult result = null; + synchronized (this) { + usesIdentifier(identifier); + // check if async upload is already in progress + if (!asyncWriteCache.hasEntry(fileName, true)) { + result = cache.store(fileName, temporary, true); + } + } + LOG.debug("storing [{}] in localCache took [{}] ms", identifier, + (System.currentTimeMillis() - currTime)); + if (result != null) { + if (result.canAsyncUpload()) { + backend.writeAsync(identifier, result.getFile(), this); + } else { + backend.write(identifier, result.getFile()); + } + } + // this will also make sure that + // tempId is not garbage collected until here + inUse.remove(tempId); + LOG.debug("addRecord [{}] of length [{}] took [{}]ms.", + new Object[] { identifier, length, + (System.currentTimeMillis() - startTime) }); + return new CachingDataRecord(this, identifier); + } catch (NoSuchAlgorithmException e) { + throw new DataStoreException(DIGEST + " not available", e); + } catch (IOException e) { + throw new DataStoreException("Could not add record", e); + } finally { + if (temporary != null) { + // try to delete - but it's not a big deal if we can't + temporary.delete(); + } + } + } + + @Override + public DataRecord getRecord(DataIdentifier identifier) + throws DataStoreException { + String fileName = getFileName(identifier); + try { + if (getLength(identifier) > -1) { + LOG.trace("getRecord: [{}] retrieved using getLength", + identifier); + if (minModifiedDate > 0) { + touchInternal(identifier); + } + usesIdentifier(identifier); + return new CachingDataRecord(this, identifier); + } else if (asyncWriteCache.hasEntry(fileName, minModifiedDate > 0)) { + LOG.trace("getRecord: [{}] retrieved from asyncUploadmap", + identifier); + usesIdentifier(identifier); + return new CachingDataRecord(this, identifier); + } + } catch (IOException ioe) { + throw new DataStoreException("error in getting record [" + + identifier + "]", ioe); + } + throw new DataStoreException("Record not found: " + identifier); + } + + /** + * Get a data record for the given identifier or null it data record doesn't + * exist in {@link Backend} + * + * @param identifier identifier of record. + * @return the {@link CachingDataRecord} or null. + */ + @Override + public DataRecord getRecordIfStored(DataIdentifier identifier) + throws DataStoreException { + String fileName = getFileName(identifier); + try { + if (asyncWriteCache.hasEntry(fileName, minModifiedDate > 0)) { + LOG.trace( + "getRecordIfStored: [{}] retrieved from asyncuploadmap", + identifier); + usesIdentifier(identifier); + return new CachingDataRecord(this, identifier); + } else if (recLenCache.containsKey(identifier)) { + LOG.trace( + "getRecordIfStored: [{}] retrieved using recLenCache", + identifier); + if (minModifiedDate > 0) { + touchInternal(identifier); + } + usesIdentifier(identifier); + return new CachingDataRecord(this, identifier); + } else { + try { + long length = backend.getLength(identifier); + LOG.debug( + "getRecordIfStored :[{}] retrieved from backend", + identifier); + recLenCache.put(identifier, length); + if (minModifiedDate > 0) { + touchInternal(identifier); + } + usesIdentifier(identifier); + return new CachingDataRecord(this, identifier); + } catch (DataStoreException ignore) { + LOG.warn(" getRecordIfStored: [{}] not found", identifier); + } + + } + } catch (IOException ioe) { + throw new DataStoreException(ioe); + } + return null; + } + + @Override + public void updateModifiedDateOnAccess(long before) { + LOG.info("minModifiedDate set to [{}]", before); + minModifiedDate = before; + } + + /** + * Retrieves all identifiers from {@link Backend}. + */ + @Override + public Iterator getAllIdentifiers() + throws DataStoreException { + Set ids = new HashSet(); + for (String fileName : asyncWriteCache.getAll()) { + ids.add(getIdentifier(fileName)); + } + Iterator itr = backend.getAllIdentifiers(); + while (itr.hasNext()) { + ids.add(itr.next()); + } + return ids.iterator(); + } + + /** + * This method deletes record from {@link Backend} and then from + * {@link LocalCache} + */ + @Override + public void deleteRecord(DataIdentifier identifier) + throws DataStoreException { + String fileName = getFileName(identifier); + synchronized (this) { + try { + // order is important here + recLenCache.remove(identifier); + asyncWriteCache.delete(fileName); + backend.deleteRecord(identifier); + cache.delete(fileName); + } catch (IOException ioe) { + throw new DataStoreException(ioe); + } + } + } + + @Override + public synchronized int deleteAllOlderThan(long min) + throws DataStoreException { + Set diSet = backend.deleteAllOlderThan(min); + + // remove entries from local cache + for (DataIdentifier identifier : diSet) { + recLenCache.remove(identifier); + cache.delete(getFileName(identifier)); + } + try { + for (String fileName : asyncWriteCache.deleteOlderThan(min)) { + diSet.add(getIdentifier(fileName)); + } + } catch (IOException e) { + throw new DataStoreException(e); + } + LOG.info( + "deleteAllOlderThan exit. Deleted [{}]records. Number of records deleted [{}]", + diSet, diSet.size()); + return diSet.size(); + } + + /** + * Get stream of record from {@link LocalCache}. If record is not available + * in {@link LocalCache}, this method fetches record from {@link Backend} + * and stores it to {@link LocalCache}. Stream is then returned from cached + * record. + */ + InputStream getStream(DataIdentifier identifier) throws DataStoreException { + InputStream in = null; + try { + String fileName = getFileName(identifier); + InputStream cached = cache.getIfStored(fileName); + if (cached != null) { + return cached; + } + in = backend.read(identifier); + return cache.store(fileName, in); + } catch (IOException e) { + throw new DataStoreException("IO Exception: " + identifier, e); + } finally { + IOUtils.closeQuietly(in); + } + } + + /** + * Return lastModified of record from {@link Backend} assuming + * {@link Backend} as a single source of truth. + */ + public long getLastModified(DataIdentifier identifier) + throws DataStoreException { + String fileName = getFileName(identifier); + long lastModified = asyncWriteCache.getLastModified(fileName); + if (lastModified != 0) { + LOG.trace( + "identifier [{}], lastModified=[{}] retrireved from AsyncUploadCache ", + identifier, lastModified); + + } else if (asyncTouchCache.get(identifier) != null) { + lastModified = asyncTouchCache.get(identifier); + LOG.trace( + "identifier [{}], lastModified=[{}] retrireved from asyncTouchCache ", + identifier, lastModified); + } else { + lastModified = backend.getLastModified(identifier); + LOG.debug( + "identifier [{}], lastModified=[{}] retrireved from backend ", + identifier, lastModified); + asyncDownload(identifier); + } + return lastModified; + } + + /** + * Return the length of record from {@link LocalCache} if available, + * otherwise retrieve it from {@link Backend}. + */ + public long getLength(final DataIdentifier identifier) + throws DataStoreException { + String fileName = getFileName(identifier); + + Long length = recLenCache.get(identifier); + if (length != null) { + LOG.trace(" identifier [{}] length fetched from recLengthCache", + identifier); + return length; + } else if ((length = cache.getFileLength(fileName)) != null) { + LOG.trace(" identifier [{}] length fetched from local cache", + identifier); + recLenCache.put(identifier, length); + return length; + } else { + length = backend.getLength(identifier); + LOG.debug(" identifier [{}] length fetched from backend", + identifier); + recLenCache.put(identifier, length); + asyncDownload(identifier); + return length; + } + } + + @Override + protected byte[] getOrCreateReferenceKey() throws DataStoreException { + return secret.getBytes(StandardCharsets.UTF_8); + } + + public Set getPendingUploads() { + return asyncWriteCache.getAll(); + } + + + public void deleteFromCache(DataIdentifier identifier) + throws DataStoreException { + try { + // order is important here + recLenCache.remove(identifier); + String fileName = getFileName(identifier); + asyncWriteCache.delete(fileName); + cache.delete(fileName); + } catch (IOException ioe) { + throw new DataStoreException(ioe); + } + } + + @Override + public void onSuccess(AsyncUploadResult result) { + DataIdentifier identifier = result.getIdentifier(); + File file = result.getFile(); + String fileName = getFileName(identifier); + try { + LOG.debug("Upload completed for [{}]", identifier); + // remove from failed upload map if any. + uploadRetryMap.remove(identifier); + AsyncUploadCacheResult cachedResult = asyncWriteCache.remove(fileName); + if (cachedResult.doRequiresDelete()) { + // added record already marked for delete + deleteRecord(identifier); + } else { + // async upload took lot of time. + // getRecord to touch if required. + getRecord(identifier); + } + } catch (IOException ie) { + LOG.warn("Cannot remove pending file upload. Dataidentifer [ " + + identifier + "], file [" + file.getAbsolutePath() + "]", ie); + } catch (DataStoreException dse) { + LOG.warn("Cannot remove pending file upload. Dataidentifer [ " + + identifier + "], file [" + file.getAbsolutePath() + "]", dse); + } + } + + @Override + public void onFailure(AsyncUploadResult result) { + DataIdentifier identifier = result.getIdentifier(); + File file = result.getFile(); + String fileName = getFileName(identifier); + if (result.getException() != null) { + LOG.warn("Async Upload failed. Dataidentifer [ " + identifier + + "], file [" + file.getAbsolutePath() + "]", + result.getException()); + } else { + LOG.warn("Async Upload failed. Dataidentifer [ " + identifier + + "], file [" + file.getAbsolutePath() + "]"); + } + // Retry failed upload upto uploadRetries times. + try { + if (asyncWriteCache.hasEntry(fileName, false)) { + synchronized (uploadRetryMap) { + Integer retry = uploadRetryMap.get(identifier); + if (retry == null) { + retry = new Integer(1); + } else { + retry++; + } + if (retry <= uploadRetries) { + uploadRetryMap.put(identifier, retry); + LOG.info( + "Retrying [{}] times failed upload for dataidentifer {}", + retry, identifier); + try { + backend.writeAsync(identifier, file, this); + } catch (DataStoreException e) { + LOG.warn("exception", e); + } + } else { + LOG.info("Retries [{}] exhausted for dataidentifer {}.", + (retry - 1), identifier); + uploadRetryMap.remove(identifier); + } + } + } + } catch (IOException ie) { + LOG.warn("Cannot retry failed async file upload. Dataidentifer [ " + + identifier + "], file [" + file.getAbsolutePath() + "]", ie); + } + } + + @Override + public void onAbort(AsyncUploadResult result) { + DataIdentifier identifier = result.getIdentifier(); + File file = result.getFile(); + String fileName = getFileName(identifier); + try { + // remove from failed upload map if any. + uploadRetryMap.remove(identifier); + asyncWriteCache.remove(fileName); + LOG.info( + "Async Upload Aborted. Dataidentifer [{}], file [{}] removed from AsyncCache.", + identifier, file.getAbsolutePath()); + } catch (IOException ie) { + LOG.warn("Cannot remove pending file upload. Dataidentifer [ " + + identifier + "], file [" + file.getAbsolutePath() + "]", ie); + } + } + + + @Override + public void onSuccess(AsyncTouchResult result) { + asyncTouchCache.remove(result.getIdentifier()); + LOG.debug(" Async Touch succeed. Removed [{}] from asyncTouchCache", + result.getIdentifier()); + + } + + @Override + public void onFailure(AsyncTouchResult result) { + LOG.warn(" Async Touch failed. Not removing [{}] from asyncTouchCache", + result.getIdentifier()); + if (result.getException() != null) { + LOG.debug(" Async Touch failed. exception", result.getException()); + } + } + + @Override + public void onAbort(AsyncTouchResult result) { + asyncTouchCache.remove(result.getIdentifier()); + LOG.debug(" Async Touch aborted. Removed [{}] from asyncTouchCache", + result.getIdentifier()); + } + + /** + * Method to confirm that identifier can be deleted from {@link Backend} + * + * @param identifier + * @return + */ + public boolean confirmDelete(DataIdentifier identifier) { + if (isInUse(identifier)) { + LOG.debug("identifier [{}] is inUse confirmDelete= false ", + identifier); + return false; + } + + String fileName = getFileName(identifier); + long lastModified = asyncWriteCache.getLastModified(fileName); + if (lastModified != 0) { + LOG.debug( + "identifier [{}] is asyncWriteCache map confirmDelete= false ", + identifier); + return false; + + } + if (asyncTouchCache.get(identifier) != null) { + LOG.debug( + "identifier [{}] is asyncTouchCache confirmDelete = false ", + identifier); + return false; + } + + return true; + } + + /** + * Internal method to touch identifier in @link {@link Backend}. if + * {@link #touchAsync}, the record is updated asynchronously. + * + * @param identifier + * @throws DataStoreException + */ + private void touchInternal(DataIdentifier identifier) + throws DataStoreException { + + if (touchAsync) { + Long lastModified = asyncTouchCache.put(identifier, + System.currentTimeMillis()); + + if (lastModified == null) { + LOG.debug("Async touching [{}] ", identifier); + backend.touchAsync(identifier, minModifiedDate, this); + } else { + LOG.debug( "Touched in asyncTouchMap [{}]", identifier); + } + + } else { + backend.touch(identifier, minModifiedDate); + } + } + + /** + * Invoke {@link #getStream(DataIdentifier)} asynchronously to cache binary + * asynchronously. + */ + private void asyncDownload(final DataIdentifier identifier) { + if (proactiveCaching + && cacheSize != 0 + && asyncDownloadCache.put(identifier, System.currentTimeMillis()) == null) { + downloadExecService.execute(new Runnable() { + @Override + public void run() { + long startTime = System.currentTimeMillis(); + InputStream input = null; + try { + LOG.trace("Async download [{}] started.", identifier); + input = getStream(identifier); + } catch (RepositoryException re) { + // ignore exception + } finally { + asyncDownloadCache.remove(identifier); + IOUtils.closeQuietly(input); + LOG.debug("Async download [{}] completed in [{}] ms.", + identifier, + (System.currentTimeMillis() - startTime)); + } + } + }); + } + } + + /** + * Returns a unique temporary file to be used for creating a new data + * record. + */ + private File newTemporaryFile() throws IOException { + return File.createTempFile(TMP, null, tmpDir); + } + + /** + * Load files from {@link LocalCache} to {@link Backend}. + */ + private void uploadFilesFromCache() throws RepositoryException { + ArrayList files = new ArrayList(); + listRecursive(files, directory); + long totalSize = 0; + for (File f : files) { + totalSize += f.length(); + } + if (files.size() > 0) { + if (concurrentUploadsThreads > 1) { + new FilesUploader(files, totalSize, concurrentUploadsThreads, + false).upload(); + } else { + uploadFilesInSingleThread(files, totalSize); + } + } + } + + private void uploadFilesInSingleThread(List files, long totalSize) + throws RepositoryException { + long startTime = System.currentTimeMillis(); + LOG.info("Upload: [{}] files in single thread.", files.size()); + long currentCount = 0; + long currentSize = 0; + long time = System.currentTimeMillis(); + for (File f : files) { + String name = f.getName(); + LOG.debug("upload file [{}] ", name); + if (!name.startsWith(TMP) && !name.endsWith(DS_STORE) + && f.length() > 0) { + uploadFileToBackEnd(f, false); + } + currentSize += f.length(); + currentCount++; + long now = System.currentTimeMillis(); + if (now > time + 5000) { + LOG.info("Uploaded: [{}/{}] files, [{}/{}] size data", + new Object[] { currentCount, files.size(), currentSize, + totalSize }); + time = now; + } + } + long endTime = System.currentTimeMillis(); + LOG.info( + "Uploaded: [{}/{}] files, [{}/{}] size data, time taken = [{}] sec", + new Object[] { currentCount, files.size(), currentSize, totalSize, + ((endTime - startTime) / 1000) }); + } + + /** + * Traverse recursively and populate list with files. + */ + private static void listRecursive(List list, File file) { + File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + if (f.isDirectory()) { + listRecursive(list, f); + } else { + list.add(f); + } + } + } + } + + /** + * Upload file from {@link LocalCache} to {@link Backend}. + * + * @param f + * file to uploaded. + * @throws DataStoreException + */ + private void uploadFileToBackEnd(File f, boolean updateAsyncUploadCache) + throws DataStoreException { + try { + DataIdentifier identifier = new DataIdentifier(f.getName()); + usesIdentifier(identifier); + if (!backend.exists(identifier)) { + backend.write(identifier, f); + } + if (updateAsyncUploadCache) { + String fileName = getFileName(identifier); + asyncWriteCache.remove(fileName); + } + LOG.debug("uploaded [{}]", f.getName()); + } catch (IOException ioe) { + throw new DataStoreException(ioe); + } + } + + /** + * Derive file name from identifier. + */ + private static String getFileName(DataIdentifier identifier) { + String name = identifier.toString(); + return getFileName(name); + } + + private static String getFileName(String name) { + return name.substring(0, 2) + "/" + name.substring(2, 4) + "/" + + name.substring(4, 6) + "/" + name; + } + + private static DataIdentifier getIdentifier(String fileName) { + return new DataIdentifier( + fileName.substring(fileName.lastIndexOf("/") + 1)); + } + + private void usesIdentifier(DataIdentifier identifier) { + inUse.put(identifier, new WeakReference(identifier)); + } + + private static boolean mkdirs(File dir) throws IOException { + if (dir.exists()) { + if (dir.isFile()) { + throw new IOException("Can not create a directory " + + "because a file exists with the same name: " + + dir.getAbsolutePath()); + } + return false; + } + boolean created = dir.mkdirs(); + if (!created) { + throw new IOException("Could not create directory: " + + dir.getAbsolutePath()); + } + return created; + } + + @Override + public void clearInUse() { + inUse.clear(); + } + + public boolean isInUse(DataIdentifier identifier) { + return inUse.containsKey(identifier); + } + + @Override + public void close() throws DataStoreException { + cache.close(); + backend.close(); + downloadExecService.shutdown(); + } + + /** + * Setter for configuration based secret + * + * @param secret + * the secret used to sign reference binaries + */ + public void setSecret(String secret) { + this.secret = secret; + } + + /** + * Set the minimum object length. + * + * @param minRecordLength + * the length + */ + public void setMinRecordLength(int minRecordLength) { + this.minRecordLength = minRecordLength; + } + + /** + * Return mininum object length. + */ + @Override + public int getMinRecordLength() { + return minRecordLength; + } + + /** + * Return path of configuration properties. + * + * @return path of configuration properties. + */ + public String getConfig() { + return config; + } + + /** + * Set the configuration properties path. + * + * @param config + * path of configuration properties. + */ + public void setConfig(String config) { + this.config = config; + } + + /** + * @return size of {@link LocalCache}. + */ + public long getCacheSize() { + return cacheSize; + } + + /** + * Set size of {@link LocalCache}. + * + * @param cacheSize + * size of {@link LocalCache}. + */ + public void setCacheSize(long cacheSize) { + this.cacheSize = cacheSize; + } + + /** + * @return path of {@link LocalCache}. + */ + public String getPath() { + return path; + } + + /** + * Set path of {@link LocalCache}. + * + * @param path + * of {@link LocalCache}. + */ + public void setPath(String path) { + this.path = path; + } + + /** + * @return Purge trigger factor of {@link LocalCache}. + */ + public double getCachePurgeTrigFactor() { + return cachePurgeTrigFactor; + } + + /** + * Set purge trigger factor of {@link LocalCache}. + * + * @param cachePurgeTrigFactor + * purge trigger factor. + */ + public void setCachePurgeTrigFactor(double cachePurgeTrigFactor) { + this.cachePurgeTrigFactor = cachePurgeTrigFactor; + } + + /** + * @return Purge resize factor of {@link LocalCache}. + */ + public double getCachePurgeResizeFactor() { + return cachePurgeResizeFactor; + } + + /** + * Set purge resize factor of {@link LocalCache}. + * + * @param cachePurgeResizeFactor + * purge resize factor. + */ + public void setCachePurgeResizeFactor(double cachePurgeResizeFactor) { + this.cachePurgeResizeFactor = cachePurgeResizeFactor; + } + + public int getConcurrentUploadsThreads() { + return concurrentUploadsThreads; + } + + public void setConcurrentUploadsThreads(int concurrentUploadsThreads) { + this.concurrentUploadsThreads = concurrentUploadsThreads; + } + + public int getAsyncUploadLimit() { + return asyncUploadLimit; + } + + public void setAsyncUploadLimit(int asyncUploadLimit) { + this.asyncUploadLimit = asyncUploadLimit; + } + + public boolean isContinueOnAsyncUploadFailure() { + return continueOnAsyncUploadFailure; + } + + public void setContinueOnAsyncUploadFailure( + boolean continueOnAsyncUploadFailure) { + this.continueOnAsyncUploadFailure = continueOnAsyncUploadFailure; + } + + public int getUploadRetries() { + return uploadRetries; + } + + public void setUploadRetries(int uploadRetries) { + this.uploadRetries = uploadRetries; + } + + public void setTouchAsync(boolean touchAsync) { + this.touchAsync = touchAsync; + } + + public void setProactiveCaching(boolean proactiveCaching) { + this.proactiveCaching = proactiveCaching; + } + + public void setRecLengthCacheSize(int recLengthCacheSize) { + this.recLengthCacheSize = recLengthCacheSize; + } + + public Backend getBackend() { + return backend; + } + + /** + * This class initiates files upload in multiple threads to backend. + */ + private class FilesUploader { + final List files; + + final long totalSize; + + volatile AtomicInteger currentCount = new AtomicInteger(); + + volatile AtomicLong currentSize = new AtomicLong(); + + volatile AtomicBoolean exceptionRaised = new AtomicBoolean(); + + DataStoreException exception; + + final int threads; + + final boolean updateAsyncCache; + + FilesUploader(List files, long totalSize, int threads, + boolean updateAsyncCache) { + super(); + this.files = files; + this.threads = threads; + this.totalSize = totalSize; + this.updateAsyncCache = updateAsyncCache; + } + + void addCurrentCount(int delta) { + currentCount.addAndGet(delta); + } + + void addCurrentSize(long delta) { + currentSize.addAndGet(delta); + } + + synchronized void setException(DataStoreException exception) { + exceptionRaised.getAndSet(true); + this.exception = exception; + } + + boolean isExceptionRaised() { + return exceptionRaised.get(); + } + + void logProgress() { + LOG.info("Uploaded: [{}/{}] files, [{}/{}] size data", + new Object[] { currentCount, files.size(), currentSize, + totalSize }); + } + + void upload() throws DataStoreException { + long startTime = System.currentTimeMillis(); + LOG.info(" Uploading [{}] using [{}] threads.", files.size(), threads); + ExecutorService executor = Executors.newFixedThreadPool(threads, + new NamedThreadFactory("backend-file-upload-worker")); + int partitionSize = files.size() / (threads); + int startIndex = 0; + int endIndex = partitionSize; + for (int i = 1; i <= threads; i++) { + List partitionFileList = Collections.unmodifiableList(files.subList( + startIndex, endIndex)); + FileUploaderThread fut = new FileUploaderThread( + partitionFileList, startIndex, endIndex, this, + updateAsyncCache); + executor.execute(fut); + + startIndex = endIndex; + if (i == (threads - 1)) { + endIndex = files.size(); + } else { + endIndex = startIndex + partitionSize; + } + } + // This will make the executor accept no new threads + // and finish all existing threads in the queue + executor.shutdown(); + + try { + // Wait until all threads are finish + while (!isExceptionRaised() + && !executor.awaitTermination(15, TimeUnit.SECONDS)) { + logProgress(); + } + } catch (InterruptedException ie) { + + } + long endTime = System.currentTimeMillis(); + LOG.info( + "Uploaded: [{}/{}] files, [{}/{}] size data, time taken = [{}] sec", + new Object[] { currentCount, files.size(), currentSize, + totalSize, ((endTime - startTime) / 1000) }); + if (isExceptionRaised()) { + executor.shutdownNow(); // Cancel currently executing tasks + throw exception; + } + } + + } + + /** + * This class implements {@link Runnable} interface and uploads list of + * files from startIndex to endIndex to {@link Backend} + */ + private class FileUploaderThread implements Runnable { + final List files; + + final FilesUploader filesUploader; + + final int startIndex; + + final int endIndex; + + final boolean updateAsyncCache; + + FileUploaderThread(List files, int startIndex, int endIndex, + FilesUploader controller, boolean updateAsyncCache) { + super(); + this.files = files; + this.filesUploader = controller; + this.startIndex = startIndex; + this.endIndex = endIndex; + this.updateAsyncCache = updateAsyncCache; + } + + public void run() { + long time = System.currentTimeMillis(); + LOG.debug( + "Thread [{}] : Uploading files from startIndex [{}] to endIndex [{}] both inclusive.", + new Object[] { Thread.currentThread().getName(), startIndex, + (endIndex - 1) }); + int uploadCount = 0; + long uploadSize = 0; + try { + for (File f : files) { + + if (filesUploader.isExceptionRaised()) { + break; + } + String name = f.getName(); + LOG.debug("upload file [{}] ",name); + if (!name.startsWith(TMP) && !name.endsWith(DS_STORE) + && f.length() > 0) { + uploadFileToBackEnd(f, updateAsyncCache); + } + uploadCount++; + uploadSize += f.length(); + // update upload status at every 15 seconds. + long now = System.currentTimeMillis(); + if (now > time + 15000) { + filesUploader.addCurrentCount(uploadCount); + filesUploader.addCurrentSize(uploadSize); + uploadCount = 0; + uploadSize = 0; + time = now; + } + } + // update final state. + filesUploader.addCurrentCount(uploadCount); + filesUploader.addCurrentSize(uploadSize); + } catch (DataStoreException e) { + if (!filesUploader.isExceptionRaised()) { + filesUploader.setException(e); + } + } + + } + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingFDS.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingFDS.java new file mode 100644 index 00000000000..128c326cae8 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/CachingFDS.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * {@link CachingDataStore} with {@link FSBackend}. It is performant + * {@link DataStore} when {@link FSBackend} is hosted on network storage + * (SAN or NAS). It leverages all caching capabilites of + * {@link CachingDataStore}. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.util.Properties; + +public class CachingFDS extends CachingDataStore { + private Properties properties; + + @Override + protected Backend createBackend() { + FSBackend backend = new FSBackend(); + if (properties != null) { + backend.setProperties(properties); + } + return backend; + } + + @Override + protected String getMarkerFile() { + return "fs.init.done"; + } + + /** + * Properties required to configure the S3Backend + */ + public void setProperties(Properties properties) { + this.properties = properties; + } +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataIdentifier.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataIdentifier.java new file mode 100644 index 00000000000..74bf140bc33 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataIdentifier.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.Serializable; + +/** + * Opaque data identifier used to identify records in a data store. + * All identifiers must be serializable and implement the standard + * object equality and hash code methods. + */ +public class DataIdentifier implements Serializable { + + /** + * Serial version UID. + */ + private static final long serialVersionUID = -9197191401131100016L; + + /** + * Data identifier. + */ + private final String identifier; + + /** + * Creates a data identifier from the given string. + * + * @param identifier data identifier + */ + public DataIdentifier(String identifier) { + this.identifier = identifier; + } + + //-------------------------------------------------------------< Object > + + /** + * Returns the identifier string. + * + * @return identifier string + */ + public String toString() { + return identifier; + } + + /** + * Checks if the given object is a data identifier and has the same + * string representation as this one. + * + * @param object other object + * @return true if the given object is the same identifier, + * false otherwise + */ + public boolean equals(Object object) { + return (object instanceof DataIdentifier) + && identifier.equals(object.toString()); + } + + /** + * Returns the hash code of the identifier string. + * + * @return hash code + */ + public int hashCode() { + return identifier.hashCode(); + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataRecord.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataRecord.java new file mode 100644 index 00000000000..22bff853d8f --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataRecord.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.InputStream; + +/** + * Immutable data record that consists of a binary stream. + */ +public interface DataRecord { + + /** + * Returns the identifier of this record. + * + * @return data identifier + */ + DataIdentifier getIdentifier(); + + /** + * Returns a secure reference to this binary, or {@code null} if no such + * reference is available. + * + * @return binary reference, or {@code null} + */ + String getReference(); + + /** + * Returns the length of the binary stream in this record. + * + * @return length of the binary stream + * @throws DataStoreException if the record could not be accessed + */ + long getLength() throws DataStoreException; + + /** + * Returns the the binary stream in this record. + * + * @return binary stream + * @throws DataStoreException if the record could not be accessed + */ + InputStream getStream() throws DataStoreException; + + /** + * Returns the last modified of the record. + * + * @return last modified time of the binary stream + */ + long getLastModified(); +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataStore.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataStore.java new file mode 100644 index 00000000000..c99e13b787d --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataStore.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.InputStream; +import java.util.Iterator; + +import javax.jcr.RepositoryException; + +/** + * Append-only store for binary streams. A data store consists of a number + * of identifiable data records that each contain a distinct binary stream. + * New binary streams can be added to the data store, but existing streams + * are never removed or modified. + *

+ * A data store should be fully thread-safe, i.e. it should be possible to + * add and access data records concurrently. Optimally even separate processes + * should be able to concurrently access the data store with zero interprocess + * synchronization. + */ +public interface DataStore { + + /** + * Check if a record for the given identifier exists, and return it if yes. + * If no record exists, this method returns null. + * + * @param identifier data identifier + * @return the record if found, and null if not + * @throws DataStoreException if the data store could not be accessed + */ + DataRecord getRecordIfStored(DataIdentifier identifier) + throws DataStoreException; + + /** + * Returns the identified data record. The given identifier should be + * the identifier of a previously saved data record. Since records are + * never removed, there should never be cases where the identified record + * is not found. Abnormal cases like that are treated as errors and + * handled by throwing an exception. + * + * @param identifier data identifier + * @return identified data record + * @throws DataStoreException if the data store could not be accessed, + * or if the given identifier is invalid + */ + DataRecord getRecord(DataIdentifier identifier) throws DataStoreException; + + /** + * Returns the record that matches the given binary reference. + * Returns {@code null} if the reference is invalid, for example if it + * points to a record that does not exist. + * + * @param reference binary reference + * @return matching record, or {@code null} + * @throws DataStoreException if the data store could not be accessed + */ + DataRecord getRecordFromReference(String reference) + throws DataStoreException; + + /** + * Creates a new data record. The given binary stream is consumed and + * a binary record containing the consumed stream is created and returned. + * If the same stream already exists in another record, then that record + * is returned instead of creating a new one. + *

+ * The given stream is consumed and not closed by this + * method. It is the responsibility of the caller to close the stream. + * A typical call pattern would be: + *

+     *     InputStream stream = ...;
+     *     try {
+     *         record = store.addRecord(stream);
+     *     } finally {
+     *         stream.close();
+     *     }
+     * 
+ * + * @param stream binary stream + * @return data record that contains the given stream + * @throws DataStoreException if the data store could not be accessed + */ + DataRecord addRecord(InputStream stream) throws DataStoreException; + + /** + * From now on, update the modified date of an object even when accessing it. + * Usually, the modified date is only updated when creating a new object, + * or when a new link is added to an existing object. When this setting is enabled, + * even getLength() will update the modified date. + * + * @param before - update the modified date to the current time if it is older than this value + */ + void updateModifiedDateOnAccess(long before); + + /** + * Delete objects that have a modified date older than the specified date. + * + * @param min the minimum time + * @return the number of data records deleted + * @throws DataStoreException + */ + int deleteAllOlderThan(long min) throws DataStoreException; + + /** + * Get all identifiers. + * + * @return an iterator over all DataIdentifier objects + * @throws DataStoreException if the list could not be read + */ + Iterator getAllIdentifiers() throws DataStoreException; + + /** + * Initialized the data store + * + * @param homeDir the home directory of the repository + * @throws RepositoryException + */ + void init(String homeDir) throws RepositoryException; + + /** + * Get the minimum size of an object that should be stored in this data store. + * Depending on the overhead and configuration, each store may return a different value. + * + * @return the minimum size in bytes + */ + int getMinRecordLength(); + + /** + * Close the data store + * + * @throws DataStoreException if a problem occurred + */ + void close() throws DataStoreException; + + /** + * Clear the in-use list. This is only used for testing to make the the garbage collection + * think that objects are no longer in use. + */ + void clearInUse(); + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataStoreException.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataStoreException.java new file mode 100644 index 00000000000..923fccf93b1 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/DataStoreException.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import javax.jcr.RepositoryException; + +/** + * Exception thrown by the Data Store module. + */ +public class DataStoreException extends RepositoryException { + + /** + * Constructs a new instance of this class with the specified detail + * message. + * + * @param message the detailed message. + */ + public DataStoreException(String message) { + super(message); + } + + /** + * Constructs a new instance of this class with the specified detail + * message and root cause. + * + * @param message the detailed message. + * @param cause root failure cause + */ + public DataStoreException(String message, Throwable cause) { + super(message, cause); + } + + /** + * Constructs a new instance of this class with the specified root cause. + * + * @param rootCause root failure cause + */ + public DataStoreException(Throwable rootCause) { + super(rootCause); + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FSBackend.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FSBackend.java new file mode 100644 index 00000000000..5cf26954b24 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FSBackend.java @@ -0,0 +1,477 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * File system {@link Backend} used with {@link CachingDataStore}. + * The file system can be network storage. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; +import java.util.Set; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class FSBackend extends AbstractBackend { + + private Properties properties; + + private String fsPath; + + File fsPathDir; + + public static final String FS_BACKEND_PATH = "fsBackendPath"; + + /** + * Logger instance. + */ + private static final Logger LOG = LoggerFactory.getLogger(FSBackend.class); + + /** + * The maximum last modified time resolution of the file system. + */ + private static final int ACCESS_TIME_RESOLUTION = 2000; + + @Override + public void init(CachingDataStore store, String homeDir, String config) + throws DataStoreException { + super.init(store, homeDir, config); + Properties initProps = null; + // Check is configuration is already provided. That takes precedence + // over config provided via file based config + if (this.properties != null) { + initProps = this.properties; + } else { + initProps = new Properties(); + InputStream in = null; + try { + in = new FileInputStream(config); + initProps.load(in); + } catch (IOException e) { + throw new DataStoreException( + "Could not initialize FSBackend from " + config, e); + } finally { + IOUtils.closeQuietly(in); + } + this.properties = initProps; + } + init(store, homeDir, initProps); + + } + + public void init(CachingDataStore store, String homeDir, Properties prop) + throws DataStoreException { + setDataStore(store); + setHomeDir(homeDir); + this.fsPath = prop.getProperty(FS_BACKEND_PATH); + if (this.fsPath == null || "".equals(this.fsPath)) { + throw new DataStoreException("Could not initialize FSBackend from " + + getConfig() + ". [" + FS_BACKEND_PATH + "] property not found."); + } + fsPathDir = new File(this.fsPath); + if (fsPathDir.exists() && fsPathDir.isFile()) { + throw new DataStoreException("Can not create a directory " + + "because a file exists with the same name: " + this.fsPath); + } + if (!fsPathDir.exists()) { + boolean created = fsPathDir.mkdirs(); + if (!created) { + throw new DataStoreException("Could not create directory: " + + fsPathDir.getAbsolutePath()); + } + } + } + + @Override + public InputStream read(DataIdentifier identifier) + throws DataStoreException { + File file = getFile(identifier); + try { + return new AutoClosingLazyFileInputStream(file); + } catch (IOException e) { + throw new DataStoreException("Error opening input stream of " + + file.getAbsolutePath(), e); + } + } + + @Override + public long getLength(DataIdentifier identifier) throws DataStoreException { + File file = getFile(identifier); + if (file.isFile()) { + return file.length(); + } + throw new DataStoreException("Could not length of dataIdentifier [" + + identifier + "]"); + } + + @Override + public long getLastModified(DataIdentifier identifier) + throws DataStoreException { + long start = System.currentTimeMillis(); + File f = getFile(identifier); + if (f.isFile()) { + return getLastModified(f); + } + LOG.info("getLastModified:Identifier [{}] not found. Took [{}] ms.", + identifier, (System.currentTimeMillis() - start)); + throw new DataStoreException("Identifier [" + identifier + + "] not found."); + } + + @Override + public void write(DataIdentifier identifier, File src) + throws DataStoreException { + File dest = getFile(identifier); + synchronized (this) { + if (dest.exists()) { + long now = System.currentTimeMillis(); + if (getLastModified(dest) < now + ACCESS_TIME_RESOLUTION) { + setLastModified(dest, now + ACCESS_TIME_RESOLUTION); + } + } else { + try { + FileUtils.copyFile(src, dest); + } catch (IOException ioe) { + LOG.error("failed to copy [{}] to [{}]", + src.getAbsolutePath(), dest.getAbsolutePath()); + throw new DataStoreException("Not able to write file [" + + identifier + "]", ioe); + } + } + } + + } + + @Override + public void writeAsync(final DataIdentifier identifier, final File src, + final AsyncUploadCallback callback) + throws DataStoreException { + if (callback == null) { + throw new IllegalArgumentException( + "callback parameter cannot be null in asyncUpload"); + } + getAsyncWriteExecutor().execute(new Runnable() { + @Override + public void run() { + try { + write(identifier, src); + callback.onSuccess(new AsyncUploadResult(identifier, src)); + } catch (DataStoreException dse) { + AsyncUploadResult res = new AsyncUploadResult(identifier, + src); + res.setException(dse); + callback.onFailure(res); + } + + } + }); + } + + @Override + public Iterator getAllIdentifiers() + throws DataStoreException { + ArrayList files = new ArrayList(); + for (File file : fsPathDir.listFiles()) { + if (file.isDirectory()) { // skip top-level files + listRecursive(files, file); + } + } + + ArrayList identifiers = new ArrayList(); + for (File f : files) { + String name = f.getName(); + identifiers.add(new DataIdentifier(name)); + } + LOG.debug("Found " + identifiers.size() + " identifiers."); + return identifiers.iterator(); + } + + @Override + public boolean exists(DataIdentifier identifier, boolean touch) + throws DataStoreException { + File file = getFile(identifier); + if (file.isFile()) { + if (touch) { + long now = System.currentTimeMillis(); + setLastModified(file, now + ACCESS_TIME_RESOLUTION); + } + return true; + } + return false; + } + + @Override + public boolean exists(DataIdentifier identifier) throws DataStoreException { + return exists(identifier, false); + } + + @Override + public void touch(DataIdentifier identifier, long minModifiedDate) + throws DataStoreException { + File file = getFile(identifier); + long now = System.currentTimeMillis(); + if (minModifiedDate > 0 && minModifiedDate > getLastModified(file)) { + setLastModified(file, now + ACCESS_TIME_RESOLUTION); + } + } + + @Override + public void touchAsync(final DataIdentifier identifier, + final long minModifiedDate, + final AsyncTouchCallback callback) + throws DataStoreException { + try { + if (callback == null) { + throw new IllegalArgumentException( + "callback parameter cannot be null in touchAsync"); + } + Thread.currentThread().setContextClassLoader( + getClass().getClassLoader()); + + getAsyncWriteExecutor().execute(new Runnable() { + @Override + public void run() { + try { + touch(identifier, minModifiedDate); + callback.onSuccess(new AsyncTouchResult(identifier)); + } catch (DataStoreException e) { + AsyncTouchResult result = new AsyncTouchResult( + identifier); + result.setException(e); + callback.onFailure(result); + } + } + }); + } catch (Exception e) { + if (callback != null) { + callback.onAbort(new AsyncTouchResult(identifier)); + } + throw new DataStoreException("Cannot touch the record " + + identifier.toString(), e); + } + + } + + @Override + public Set deleteAllOlderThan(long min) + throws DataStoreException { + Set deleteIdSet = new HashSet(30); + for (File file : fsPathDir.listFiles()) { + if (file.isDirectory()) { // skip top-level files + deleteOlderRecursive(file, min, deleteIdSet); + } + } + return deleteIdSet; + } + + @Override + public void deleteRecord(DataIdentifier identifier) + throws DataStoreException { + File file = getFile(identifier); + synchronized (this) { + if (file.exists()) { + if (file.delete()) { + deleteEmptyParentDirs(file); + } else { + LOG.warn("Failed to delete file " + file.getAbsolutePath()); + } + } + } + } + + /** + * Properties used to configure the backend. If provided explicitly before + * init is invoked then these take precedence + * @param properties to configure S3Backend + */ + public void setProperties(Properties properties) { + this.properties = properties; + } + + /** + * Returns the identified file. This method implements the pattern used to + * avoid problems with too many files in a single directory. + *

+ * No sanity checks are performed on the given identifier. + * @param identifier data identifier + * @return identified file + */ + private File getFile(DataIdentifier identifier) { + String string = identifier.toString(); + File file = this.fsPathDir; + file = new File(file, string.substring(0, 2)); + file = new File(file, string.substring(2, 4)); + file = new File(file, string.substring(4, 6)); + return new File(file, string); + } + + /** + * Set the last modified date of a file, if the file is writable. + * @param file the file + * @param time the new last modified date + * @throws DataStoreException if the file is writable but modifying the date + * fails + */ + private static void setLastModified(File file, long time) + throws DataStoreException { + if (!file.setLastModified(time)) { + if (!file.canWrite()) { + // if we can't write to the file, so garbage collection will + // also not delete it + // (read only files or file systems) + return; + } + try { + // workaround for Windows: if the file is already open for + // reading + // (in this or another process), then setting the last modified + // date + // doesn't work - see also JCR-2872 + RandomAccessFile r = new RandomAccessFile(file, "rw"); + try { + r.setLength(r.length()); + } finally { + r.close(); + } + } catch (IOException e) { + throw new DataStoreException( + "An IO Exception occurred while trying to set the last modified date: " + + file.getAbsolutePath(), e); + } + } + } + + /** + * Get the last modified date of a file. + * @param file the file + * @return the last modified date + * @throws DataStoreException if reading fails + */ + private static long getLastModified(File file) throws DataStoreException { + long lastModified = file.lastModified(); + if (lastModified == 0) { + throw new DataStoreException( + "Failed to read record modified date: " + + file.getAbsolutePath()); + } + return lastModified; + } + + private void listRecursive(List list, File file) { + File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + if (f.isDirectory()) { + listRecursive(list, f); + } else { + list.add(f); + } + } + } + } + + private void deleteEmptyParentDirs(File file) { + File parent = file.getParentFile(); + try { + // Only iterate & delete if parent directory of the blob file is + // child + // of the base directory and if it is empty + while (FileUtils.directoryContains(fsPathDir, parent)) { + String[] entries = parent.list(); + if (entries == null) { + LOG.warn("Failed to list directory {}", + parent.getAbsolutePath()); + break; + } + if (entries.length > 0) { + break; + } + boolean deleted = parent.delete(); + LOG.debug("Deleted parent [{}] of file [{}]: {}", new Object[] { + parent, file.getAbsolutePath(), deleted }); + parent = parent.getParentFile(); + } + } catch (IOException e) { + LOG.warn("Error in parents deletion for " + file.getAbsoluteFile(), + e); + } + } + + private void deleteOlderRecursive(File file, long min, + Set deleteIdSet) throws DataStoreException { + if (file.isFile() && file.exists() && file.canWrite()) { + synchronized (this) { + long lastModified; + try { + lastModified = getLastModified(file); + } catch (DataStoreException e) { + LOG.warn( + "Failed to read modification date; file not deleted", e); + // don't delete the file, since the lastModified date is + // uncertain + lastModified = min; + } + if (lastModified < min) { + DataIdentifier id = new DataIdentifier(file.getName()); + if (getDataStore().confirmDelete(id)) { + getDataStore().deleteFromCache(id); + if (LOG.isInfoEnabled()) { + LOG.info("Deleting old file " + + file.getAbsolutePath() + " modified: " + + new Timestamp(lastModified).toString() + + " length: " + file.length()); + } + if (file.delete()) { + deleteIdSet.add(id); + } else { + LOG.warn("Failed to delete old file " + + file.getAbsolutePath()); + } + } + } + } + } else if (file.isDirectory()) { + File[] list = file.listFiles(); + if (list != null) { + for (File f : list) { + deleteOlderRecursive(f, min, deleteIdSet); + } + } + + // JCR-1396: FileDataStore Garbage Collector and empty directories + // Automatic removal of empty directories (but not the root!) + synchronized (this) { + list = file.listFiles(); + if (list != null && list.length == 0) { + file.delete(); + } + } + } + } +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FileDataRecord.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FileDataRecord.java new file mode 100644 index 00000000000..fb2860706dc --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FileDataRecord.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; + + +/** + * Data record that is based on a normal file. + */ +public class FileDataRecord extends AbstractDataRecord { + + /** + * The file that contains the binary stream. + */ + private final File file; + + /** + * Creates a data record based on the given identifier and file. + * + * @param identifier data identifier + * @param file file that contains the binary stream + */ + public FileDataRecord( + AbstractDataStore store, DataIdentifier identifier, File file) { + super(store, identifier); + assert file.isFile(); + this.file = file; + } + + /** + * {@inheritDoc} + */ + public long getLength() { + return file.length(); + } + + /** + * {@inheritDoc} + */ + public InputStream getStream() throws DataStoreException { + try { + return new AutoClosingLazyFileInputStream(file); + } catch (IOException e) { + throw new DataStoreException("Error opening input stream of " + file.getAbsolutePath(), e); + } + } + + /** + * {@inheritDoc} + */ + public long getLastModified() { + return file.lastModified(); + } +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FileDataStore.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FileDataStore.java new file mode 100644 index 00000000000..7827a8b6894 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/FileDataStore.java @@ -0,0 +1,500 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.lang.ref.WeakReference; +import java.nio.file.CopyOption; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.WeakHashMap; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Simple file-based data store. Data records are stored as normal files + * named using a message digest of the contained binary stream. + * + * Configuration: + *

+ * <DataStore class="org.apache.jackrabbit.oak.spi.blob.data.FileDataStore">
+ *     <param name="{@link #setPath(String) path}" value="/data/datastore"/>
+ *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
+ * </DataStore>
+ * 
+ *

+ * If the directory is not set, the directory <repository home>/repository/datastore is used. + *

+ * A three level directory structure is used to avoid placing too many + * files in a single directory. The chosen structure is designed to scale + * up to billions of distinct records. + *

+ * This implementation relies on the underlying file system to support + * atomic O(1) move operations with {@link Files#move(Path, Path, CopyOption...)}. + */ +public class FileDataStore extends AbstractDataStore + implements MultiDataStoreAware { + + /** + * Logger instance + */ + private static Logger log = LoggerFactory.getLogger(FileDataStore.class); + + /** + * The default value for the minimum object size. + */ + private static final int DEFAULT_MIN_RECORD_LENGTH = 100; + + /** + * The maximum last modified time resolution of the file system. + */ + private static final int ACCESS_TIME_RESOLUTION = 2000; + + /** + * Name of the directory used for temporary files. + * Must be at least 3 characters. + */ + private static final String TMP = "tmp"; + + /** + * The minimum modified date. If a file is accessed (read or write) with a modified date + * older than this value, the modified date is updated to the current time. + */ + private volatile long minModifiedDate; + + /** + * The directory that contains all the data record files. The structure + * of content within this directory is controlled by this class. + */ + private File directory; + + /** + * The name of the directory that contains all the data record files. The structure + * of content within this directory is controlled by this class. + */ + private String path; + + /** + * The minimum size of an object that should be stored in this data store. + */ + private int minRecordLength = DEFAULT_MIN_RECORD_LENGTH; + + /** + * All data identifiers that are currently in use are in this set until they are garbage collected. + */ + protected Map> inUse = + Collections.synchronizedMap(new WeakHashMap>()); + + /** + * Initialized the data store. + * If the path is not set, <repository home>/repository/datastore is used. + * This directory is automatically created if it does not yet exist. + * + * @param homeDir + */ + public void init(String homeDir) { + if (path == null) { + path = homeDir + "/repository/datastore"; + } + directory = new File(path); + directory.mkdirs(); + } + + /** + * Get a data record for the given identifier. + * + * @param identifier the identifier + * @return the data record or null + */ + public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { + File file = getFile(identifier); + if (!file.exists()) { + return null; + } + if (minModifiedDate != 0) { + // only check when running garbage collection + synchronized (this) { + if (getLastModified(file) < minModifiedDate) { + setLastModified(file, System.currentTimeMillis() + ACCESS_TIME_RESOLUTION); + } + } + } + usesIdentifier(identifier); + return new FileDataRecord(this, identifier, file); + } + + private void usesIdentifier(DataIdentifier identifier) { + inUse.put(identifier, new WeakReference(identifier)); + } + + /** + * Creates a new data record. + * The stream is first consumed and the contents are saved in a temporary file + * and the {@link #DIGEST} message digest of the stream is calculated. If a + * record with the same {@link #DIGEST} digest (and length) is found then it is + * returned. Otherwise the temporary file is moved in place to become + * the new data record that gets returned. + * + * @param input binary stream + * @return data record that contains the given stream + * @throws DataStoreException if the record could not be created + */ + public DataRecord addRecord(InputStream input) throws DataStoreException { + File temporary = null; + try { + temporary = newTemporaryFile(); + DataIdentifier tempId = new DataIdentifier(temporary.getName()); + usesIdentifier(tempId); + // Copy the stream to the temporary file and calculate the + // stream length and the message digest of the stream + long length = 0; + MessageDigest digest = MessageDigest.getInstance(DIGEST); + OutputStream output = new DigestOutputStream( + new FileOutputStream(temporary), digest); + try { + length = IOUtils.copyLarge(input, output); + } finally { + output.close(); + } + DataIdentifier identifier = + new DataIdentifier(encodeHexString(digest.digest())); + File file; + + synchronized (this) { + // Check if the same record already exists, or + // move the temporary file in place if needed + usesIdentifier(identifier); + file = getFile(identifier); + if (!file.exists()) { + File parent = file.getParentFile(); + parent.mkdirs(); + Files.move(temporary.toPath(), file.toPath(), StandardCopyOption.ATOMIC_MOVE); + // no longer need to delete the temporary file + temporary = null; + } else { + long now = System.currentTimeMillis(); + if (getLastModified(file) < now + ACCESS_TIME_RESOLUTION) { + setLastModified(file, now + ACCESS_TIME_RESOLUTION); + } + } + if (file.length() != length) { + // Sanity checks on the record file. These should never fail, + // but better safe than sorry... + if (!file.isFile()) { + throw new IOException("Not a file: " + file); + } + throw new IOException(DIGEST + " collision: " + file); + } + } + // this will also make sure that + // tempId is not garbage collected until here + inUse.remove(tempId); + return new FileDataRecord(this, identifier, file); + } catch (NoSuchAlgorithmException e) { + throw new DataStoreException(DIGEST + " not available", e); + } catch (IOException e) { + throw new DataStoreException("Could not add record", e); + } finally { + if (temporary != null) { + temporary.delete(); + } + } + } + + /** + * Returns the identified file. This method implements the pattern + * used to avoid problems with too many files in a single directory. + *

+ * No sanity checks are performed on the given identifier. + * + * @param identifier data identifier + * @return identified file + */ + private File getFile(DataIdentifier identifier) { + usesIdentifier(identifier); + String string = identifier.toString(); + File file = directory; + file = new File(file, string.substring(0, 2)); + file = new File(file, string.substring(2, 4)); + file = new File(file, string.substring(4, 6)); + return new File(file, string); + } + + /** + * Returns a unique temporary file to be used for creating a new + * data record. + * + * @return temporary file + * @throws IOException + */ + private File newTemporaryFile() throws IOException { + // the directory is already created in the init method + return File.createTempFile(TMP, null, directory); + } + + public void updateModifiedDateOnAccess(long before) { + minModifiedDate = before; + } + + public void deleteRecord(DataIdentifier identifier) + throws DataStoreException { + File file = getFile(identifier); + synchronized (this) { + if (file.exists()) { + if (file.delete()) { + deleteEmptyParentDirs(file); + } else { + log.warn("Failed to delete file " + file.getAbsolutePath()); + } + } + } + } + + private void deleteEmptyParentDirs(File file) { + File parent = file.getParentFile(); + try { + // Only iterate & delete if parent directory of the blob file is child + // of the base directory and if it is empty + while (FileUtils.directoryContains(directory, parent)) { + String[] entries = parent.list(); + if (entries == null) { + log.warn("Failed to list directory {}", parent.getAbsolutePath()); + break; + } + if (entries.length > 0) { + break; + } + boolean deleted = parent.delete(); + log.debug("Deleted parent [{}] of file [{}]: {}", + new Object[]{parent, file.getAbsolutePath(), deleted}); + parent = parent.getParentFile(); + } + } catch (IOException e) { + log.warn("Error in parents deletion for " + file.getAbsoluteFile(), e); + } + } + + public int deleteAllOlderThan(long min) { + int count = 0; + for (File file : directory.listFiles()) { + if (file.isDirectory()) { // skip top-level files + count += deleteOlderRecursive(file, min); + } + } + return count; + } + + private int deleteOlderRecursive(File file, long min) { + int count = 0; + if (file.isFile() && file.exists() && file.canWrite()) { + synchronized (this) { + long lastModified; + try { + lastModified = getLastModified(file); + } catch (DataStoreException e) { + log.warn("Failed to read modification date; file not deleted", e); + // don't delete the file, since the lastModified date is uncertain + lastModified = min; + } + if (lastModified < min) { + DataIdentifier id = new DataIdentifier(file.getName()); + if (!inUse.containsKey(id)) { + if (log.isInfoEnabled()) { + log.info("Deleting old file " + file.getAbsolutePath() + + " modified: " + new Timestamp(lastModified).toString() + + " length: " + file.length()); + } + if (!file.delete()) { + log.warn("Failed to delete old file " + file.getAbsolutePath()); + } + count++; + } + } + } + } else if (file.isDirectory()) { + File[] list = file.listFiles(); + if (list != null) { + for (File f: list) { + count += deleteOlderRecursive(f, min); + } + } + + // JCR-1396: FileDataStore Garbage Collector and empty directories + // Automatic removal of empty directories (but not the root!) + synchronized (this) { + list = file.listFiles(); + if (list != null && list.length == 0) { + file.delete(); + } + } + } + return count; + } + + private void listRecursive(List list, File file) { + File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + if (f.isDirectory()) { + listRecursive(list, f); + } else { + list.add(f); + } + } + } + } + + public Iterator getAllIdentifiers() { + ArrayList files = new ArrayList(); + for (File file : directory.listFiles()) { + if (file.isDirectory()) { // skip top-level files + listRecursive(files, file); + } + } + + ArrayList identifiers = new ArrayList(); + for (File f: files) { + String name = f.getName(); + identifiers.add(new DataIdentifier(name)); + } + log.debug("Found " + identifiers.size() + " identifiers."); + return identifiers.iterator(); + } + + public void clearInUse() { + inUse.clear(); + } + + /** + * Get the name of the directory where this data store keeps the files. + * + * @return the full path name + */ + public String getPath() { + return path; + } + + /** + * Set the name of the directory where this data store keeps the files. + * + * @param directoryName the path name + */ + public void setPath(String directoryName) { + this.path = directoryName; + } + + public int getMinRecordLength() { + return minRecordLength; + } + + /** + * Set the minimum object length. + * + * @param minRecordLength the length + */ + public void setMinRecordLength(int minRecordLength) { + this.minRecordLength = minRecordLength; + } + + public void close() { + // nothing to do + } + + //---------------------------------------------------------< protected >-- + + @Override + protected byte[] getOrCreateReferenceKey() throws DataStoreException { + File file = new File(directory, "reference.key"); + try { + if (file.exists()) { + return FileUtils.readFileToByteArray(file); + } else { + byte[] key = super.getOrCreateReferenceKey(); + FileUtils.writeByteArrayToFile(file, key); + return key; + } + } catch (IOException e) { + throw new DataStoreException( + "Unable to access reference key file " + file.getPath(), e); + } + } + + //-----------------------------------------------------------< private >-- + + /** + * Get the last modified date of a file. + * + * @param file the file + * @return the last modified date + * @throws DataStoreException if reading fails + */ + private static long getLastModified(File file) throws DataStoreException { + long lastModified = file.lastModified(); + if (lastModified == 0) { + throw new DataStoreException("Failed to read record modified date: " + file.getAbsolutePath()); + } + return lastModified; + } + + /** + * Set the last modified date of a file, if the file is writable. + * + * @param file the file + * @param time the new last modified date + * @throws DataStoreException if the file is writable but modifying the date fails + */ + private static void setLastModified(File file, long time) throws DataStoreException { + if (!file.setLastModified(time)) { + if (!file.canWrite()) { + // if we can't write to the file, so garbage collection will also not delete it + // (read only files or file systems) + return; + } + try { + // workaround for Windows: if the file is already open for reading + // (in this or another process), then setting the last modified date + // doesn't work - see also JCR-2872 + RandomAccessFile r = new RandomAccessFile(file, "rw"); + try { + r.setLength(r.length()); + } finally { + r.close(); + } + } catch (IOException e) { + throw new DataStoreException("An IO Exception occurred while trying to set the last modified date: " + file.getAbsolutePath(), e); + } + } + } +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/LocalCache.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/LocalCache.java new file mode 100644 index 00000000000..d80762510d7 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/LocalCache.java @@ -0,0 +1,668 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.util.TransientFileFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class implements a LRU cache used by {@link CachingDataStore}. If cache + * size exceeds limit, this cache goes in purge mode. In purge mode any + * operation to cache is no-op. After purge cache size would be less than + * cachePurgeResizeFactor * maximum size. + */ +public class LocalCache { + + /** + * Logger instance. + */ + static final Logger LOG = LoggerFactory.getLogger(LocalCache.class); + + /** + * The file names of the files that need to be deleted. + */ + final Set toBeDeleted = new HashSet(); + + /** + * The filename Vs file size LRU cache. + */ + LRUCache cache; + + /** + * The directory where the files are created. + */ + private final File directory; + + /** + * The directory where tmp files are created. + */ + private final File tmp; + + /** + * If true cache is in purgeMode and not available. All operation would be + * no-op. + */ + private volatile boolean purgeMode; + + private AsyncUploadCache asyncUploadCache; + + private AtomicLong cacheMissCounter = new AtomicLong(); + + private AtomicLong cacheMissDuration = new AtomicLong(); + + + /** + * Build LRU cache of files located at 'path'. It uses lastModified property + * of file to build LRU cache. If cache size exceeds limit size, this cache + * goes in purge mode. In purge mode any operation to cache is no-op. + * + * @param path file system path + * @param tmpPath temporary directory used by cache. + * @param maxSizeInBytes maximum size of cache. + * @param cachePurgeTrigFactor factor which triggers cache to purge mode. + * That is if current size exceed (cachePurgeTrigFactor * maxSizeInBytes), the + * cache will go in auto-purge mode. + * @param cachePurgeResizeFactor after cache purge size of cache will be + * just less (cachePurgeResizeFactor * maxSizeInBytes). + * @param asyncUploadCache {@link AsyncUploadCache} + */ + public LocalCache(String path, String tmpPath, long maxSizeInBytes, double cachePurgeTrigFactor, + double cachePurgeResizeFactor, AsyncUploadCache asyncUploadCache) { + directory = new File(path); + tmp = new File(tmpPath); + LOG.info( + "cachePurgeTrigFactor =[{}], cachePurgeResizeFactor =[{}], " + + "cachePurgeTrigFactorSize =[{}], cachePurgeResizeFactorSize =[{}]", + new Object[] { cachePurgeTrigFactor, cachePurgeResizeFactor, + (cachePurgeTrigFactor * maxSizeInBytes), + (cachePurgeResizeFactor * maxSizeInBytes) }); + cache = new LRUCache(maxSizeInBytes, cachePurgeTrigFactor, cachePurgeResizeFactor); + this.asyncUploadCache = asyncUploadCache; + new Thread(new CacheBuildJob()).start(); + } + + /** + * Store an item in the cache and return the input stream. If cache is in + * purgeMode or file doesn't exists, inputstream from a + * {@link TransientFileFactory#createTransientFile(String, String, File)} is + * returned. Otherwise inputStream from cached file is returned. This method + * doesn't close the incoming inputstream. + * + * @param fileName the key of cache. + * @param in {@link InputStream} + * @return the (new) input stream. + */ + public InputStream store(String fileName, final InputStream in) + throws IOException { + fileName = fileName.replace("\\", "/"); + File f = getFile(fileName); + long length = 0; + if (!f.exists() || isInPurgeMode()) { + OutputStream out = null; + File transFile = null; + try { + TransientFileFactory tff = TransientFileFactory.getInstance(); + transFile = tff.createTransientFile("s3-", "tmp", tmp); + out = new BufferedOutputStream(new FileOutputStream(transFile)); + length = IOUtils.copyLarge(in, out); + } finally { + IOUtils.closeQuietly(out); + } + // rename the file to local fs cache + if (canAdmitFile(length) + && (f.getParentFile().exists() || f.getParentFile().mkdirs()) + && transFile.renameTo(f) && f.exists()) { + if (transFile.exists() && transFile.delete()) { + LOG.info("tmp file [{}] not deleted successfully", + transFile.getAbsolutePath()); + } + transFile = null; + LOG.debug( + "file [{}] doesn't exists. adding to local cache using inputstream.", + fileName); + cache.put(fileName, f.length()); + } else { + LOG.debug( + "file [{}] doesn't exists. returning transient file [{}].", + fileName, transFile.getAbsolutePath()); + f = transFile; + } + } else { + if (in instanceof BackendResourceAbortable) { + ((BackendResourceAbortable) in).abort(); + } + f.setLastModified(System.currentTimeMillis()); + LOG.debug( + "file [{}] exists. adding to local cache using inputstream.", + fileName); + cache.put(fileName, f.length()); + } + tryPurge(); + return new AutoClosingLazyFileInputStream(f); + } + + /** + * Store an item along with file in cache. Cache size is increased by + * {@link File#length()} If file already exists in cache, + * {@link File#setLastModified(long)} is updated with current time. + * + * @param fileName the key of cache. + * @param src file to be added to cache. + */ + public File store(String fileName, final File src) { + try { + return store(fileName, src, false).getFile(); + } catch (IOException ioe) { + LOG.warn("Exception in addding file [" + fileName + "] to local cache.", ioe); + } + return null; + } + + /** + * This method add file to {@link LocalCache} and tries that file can be + * added to {@link AsyncUploadCache}. If file is added to + * {@link AsyncUploadCache} successfully, it sets + * {@link AsyncUploadCacheResult#setAsyncUpload(boolean)} to true. + * + * @param fileName name of the file. + * @param src source file. + * @param tryForAsyncUpload If true it tries to add fileName to + * {@link AsyncUploadCache} + * @return {@link AsyncUploadCacheResult}. This method sets + * {@link AsyncUploadCacheResult#setAsyncUpload(boolean)} to true, if + * fileName is added to {@link AsyncUploadCache} successfully else + * it sets {@link AsyncUploadCacheResult#setAsyncUpload(boolean)} to + * false. {@link AsyncUploadCacheResult#getFile()} contains cached + * file, if it is added to {@link LocalCache} or original file. + * @throws IOException + */ + public AsyncUploadCacheResult store(String fileName, File src, + boolean tryForAsyncUpload) throws IOException { + fileName = fileName.replace("\\", "/"); + File dest = getFile(fileName); + File parent = dest.getParentFile(); + AsyncUploadCacheResult result = new AsyncUploadCacheResult(); + result.setFile(src); + result.setAsyncUpload(false); + boolean destExists = false; + if ((destExists = dest.exists()) + || (src.exists() && !dest.exists() && !src.equals(dest) + && canAdmitFile(src.length()) + && (parent.exists() || parent.mkdirs()) && (src.renameTo(dest)))) { + if (destExists) { + dest.setLastModified(System.currentTimeMillis()); + } + LOG.debug("file [{}] moved to [{}] ", src.getAbsolutePath(), dest.getAbsolutePath()); + LOG.debug( + "file [{}] exists= [{}] added to local cache, isLastModified [{}]", + new Object[] { dest.getAbsolutePath(), dest.exists(), + destExists }); + + cache.put(fileName, dest.length()); + result.setFile(dest); + if (tryForAsyncUpload) { + result.setAsyncUpload(asyncUploadCache.add(fileName).canAsyncUpload()); + } + } else { + LOG.info("file [{}] exists= [{}] not added to local cache.", + fileName, destExists); + } + tryPurge(); + return result; + } + /** + * Return the inputstream from from cache, or null if not in the cache. + * + * @param fileName name of file. + * @return stream or null. + */ + public InputStream getIfStored(String fileName) throws IOException { + File file = getFileIfStored(fileName); + return file == null ? null : new AutoClosingLazyFileInputStream(file); + } + + public File getFileIfStored(String fileName) throws IOException { + fileName = fileName.replace("\\", "/"); + File f = getFile(fileName); + long diff = (System.currentTimeMillis() - cacheMissDuration.get()) / 1000; + // logged at 5 minute interval minimum + if (diff > 5 * 60) { + LOG.info("local cache misses [{}] in [{}] sec", new Object[] { + cacheMissCounter.getAndSet(0), diff }); + cacheMissDuration.set(System.currentTimeMillis()); + } + + // return file in purge mode = true and file present in asyncUploadCache + // as asyncUploadCache's files will be not be deleted in cache purge. + if (!f.exists() || (isInPurgeMode() && !asyncUploadCache.hasEntry(fileName, false))) { + LOG.debug( + "getFileIfStored returned: purgeMode=[{}], file=[{}] exists=[{}]", + new Object[] { isInPurgeMode(), f.getAbsolutePath(), f.exists() }); + cacheMissCounter.incrementAndGet(); + return null; + } else { + // touch entry in LRU caches + f.setLastModified(System.currentTimeMillis()); + cache.get(fileName); + return f; + } + } + + /** + * Delete file from cache. Size of cache is reduced by file length. The + * method is no-op if file doesn't exist in cache. + * + * @param fileName file name that need to be removed from cache. + */ + public void delete(String fileName) { + if (isInPurgeMode()) { + LOG.debug("purgeMode true :delete returned"); + return; + } + fileName = fileName.replace("\\", "/"); + cache.remove(fileName); + } + + /** + * Returns length of file if exists in cache else returns null. + * @param fileName name of the file. + */ + public Long getFileLength(String fileName) { + Long length = null; + try { + length = cache.get(fileName); + if( length == null ) { + File f = getFileIfStored(fileName); + if (f != null) { + length = f.length(); + } + } + } catch (IOException ignore) { + + } + return length; + } + + /** + * Close the cache. Cache maintain set of files which it was not able to + * delete successfully. This method will an attempt to delete all + * unsuccessful delete files. + */ + public void close() { + LOG.debug("close"); + deleteOldFiles(); + } + + /** + * Check if cache can admit file of given length. + * @param length of the file. + * @return true if yes else return false. + */ + private boolean canAdmitFile(final long length) { + //order is important here + boolean value = !isInPurgeMode() && (cache.canAdmitFile(length)); + if (!value) { + LOG.debug("cannot admit file of length=[{}] and currentSizeInBytes=[{}] ", + length, cache.currentSizeInBytes); + } + return value; + } + + /** + * Return true if cache is in purge mode else return false. + */ + synchronized boolean isInPurgeMode() { + return purgeMode; + } + + /** + * Set purge mode. If set to true all cache operation will be no-op. If set + * to false, all operations to cache are available. + * + * @param purgeMode purge mode + */ + synchronized void setPurgeMode(final boolean purgeMode) { + this.purgeMode = purgeMode; + } + + File getFile(final String fileName) { + return new File(directory, fileName); + } + + private void deleteOldFiles() { + int initialSize = toBeDeleted.size(); + int count = 0; + for (String fileName : new ArrayList(toBeDeleted)) { + fileName = fileName.replace("\\", "/"); + if( cache.remove(fileName) != null) { + count++; + } + } + LOG.info("deleted [{}]/[{}] files.", count, initialSize); + } + + /** + * This method tries to delete a file. If it is not able to delete file due + * to any reason, it add it toBeDeleted list. + * + * @param fileName name of the file which will be deleted. + * @return true if this method deletes file successfuly else return false. + */ + boolean tryDelete(final String fileName) { + LOG.debug("try deleting file [{}]", fileName); + File f = getFile(fileName); + if (f.exists() && f.delete()) { + LOG.info("File [{}] deleted successfully", f.getAbsolutePath()); + toBeDeleted.remove(fileName); + while (true) { + f = f.getParentFile(); + if (f.equals(directory) || f.list().length > 0) { + break; + } + // delete empty parent folders (except the main directory) + f.delete(); + } + return true; + } else if (f.exists()) { + LOG.info("not able to delete file [{}]", f.getAbsolutePath()); + toBeDeleted.add(fileName); + return false; + } + return true; + } + + static int maxSizeElements(final long bytes) { + // after a CQ installation, the average item in + // the data store is about 52 KB + int count = (int) (bytes / 65535); + count = Math.max(1024, count); + count = Math.min(64 * 1024, count); + return count; + } + + /** + * This method tries purging of local cache. It checks if local cache + * has exceeded the defined limit then it triggers purge cache job in a + * seperate thread. + */ + synchronized void tryPurge() { + if (!isInPurgeMode() + && cache.currentSizeInBytes > cache.cachePurgeTrigSize) { + setPurgeMode(true); + LOG.info( + "cache.entries = [{}], currentSizeInBytes=[{}] exceeds cachePurgeTrigSize=[{}]", + new Object[] { cache.size(), cache.currentSizeInBytes, + cache.cachePurgeTrigSize }); + new Thread(new PurgeJob()).start(); + } else { + LOG.debug( + "currentSizeInBytes=[{}],cachePurgeTrigSize=[{}], isInPurgeMode =[{}]", + new Object[] { cache.currentSizeInBytes, + cache.cachePurgeTrigSize, isInPurgeMode() }); + } + } + + /** + * A LRU based extension {@link LinkedHashMap}. The key is file name and + * value is length of file. + */ + private class LRUCache extends LinkedHashMap { + private static final long serialVersionUID = 1L; + + volatile long currentSizeInBytes; + + final long maxSizeInBytes; + + final long cachePurgeResize; + + final long cachePurgeTrigSize; + + LRUCache(final long maxSizeInBytes, + final double cachePurgeTrigFactor, + final double cachePurgeResizeFactor) { + super(maxSizeElements(maxSizeInBytes), (float) 0.75, true); + this.maxSizeInBytes = maxSizeInBytes; + this.cachePurgeTrigSize = new Double(cachePurgeTrigFactor + * maxSizeInBytes).longValue(); + this.cachePurgeResize = new Double(cachePurgeResizeFactor + * maxSizeInBytes).longValue(); + } + + /** + * Overridden {@link Map#remove(Object)} to delete corresponding file + * from file system. + */ + @Override + public synchronized Long remove(final Object key) { + String fileName = (String) key; + fileName = fileName.replace("\\", "/"); + try { + // not removing file from local cache, if there is in progress + // async upload on it. + if (asyncUploadCache.hasEntry(fileName, false)) { + LOG.info( + "AsyncUploadCache upload contains file [{}]. Not removing it from LocalCache.", + fileName); + return null; + } + } catch (IOException e) { + LOG.debug("error: ", e); + return null; + } + Long flength = null; + if (tryDelete(fileName)) { + flength = super.remove(key); + if (flength != null) { + LOG.debug("cache entry [{}], with size [{}] removed.", + fileName, flength); + currentSizeInBytes -= flength.longValue(); + } + } else if (!getFile(fileName).exists()) { + // second attempt. remove from cache if file doesn't exists + flength = super.remove(key); + if (flength != null) { + LOG.debug( + "file not exists. cache entry [{}], with size [{}] removed.", + fileName, flength); + currentSizeInBytes -= flength.longValue(); + } + } else { + LOG.info("not able to remove cache entry [{}], size [{}]", key, + super.get(key)); + } + return flength; + } + + @Override + public Long put(final String fileName, final Long value) { + if( isInPurgeMode()) { + LOG.debug("cache is purge mode: put is no-op"); + return null; + } + synchronized (this) { + Long oldValue = cache.get(fileName); + if (oldValue == null) { + long flength = value.longValue(); + currentSizeInBytes += flength; + return super.put(fileName.replace("\\", "/"), value); + } + toBeDeleted.remove(fileName); + return oldValue; + } + } + + @Override + public Long get(Object key) { + if( isInPurgeMode()) { + LOG.debug("cache is purge mode: get is no-op"); + return null; + } + synchronized (this) { + return super.get(key); + } + } + + /** + * This method check if cache can admit file of given length. + * @param length length of file. + * @return true if cache size + length is less than maxSize. + */ + synchronized boolean canAdmitFile(final long length) { + return cache.currentSizeInBytes + length < cache.maxSizeInBytes; + } + } + + /** + * This class performs purging of local cache. It implements + * {@link Runnable} and should be invoked in a separate thread. + */ + private class PurgeJob implements Runnable { + public PurgeJob() { + // TODO Auto-generated constructor stub + } + + /** + * This method purges local cache till its size is less than + * cacheResizefactor * maxSize + */ + @Override + public void run() { + try { + synchronized (cache) { + // first try to delete toBeDeleted files + int initialSize = cache.size(); + LOG.info(" cache purge job started. initial cache entries = [{}]", initialSize); + for (String fileName : new ArrayList(toBeDeleted)) { + cache.remove(fileName); + } + int skipCount = 0; + Iterator> itr = cache.entrySet().iterator(); + while (itr.hasNext()) { + Map.Entry entry = itr.next(); + if (entry.getKey() != null) { + if (cache.currentSizeInBytes > cache.cachePurgeResize) { + if (cache.remove(entry.getKey()) != null) { + itr = cache.entrySet().iterator(); + for (int i = 0; i < skipCount && itr.hasNext(); i++) { + itr.next(); + } + } else { + skipCount++; + } + } else { + break; + } + } + } + LOG.info( + " cache purge job completed: cleaned [{}] files and currentSizeInBytes = [{}]", + (initialSize - cache.size()), cache.currentSizeInBytes); + } + } catch (Exception e) { + LOG.error("error in purge jobs:", e); + } finally { + setPurgeMode(false); + } + } + } + + /** + * This class implements {@link Runnable} interface to build LRU cache + * asynchronously. + */ + private class CacheBuildJob implements Runnable { + + + public void run() { + long startTime = System.currentTimeMillis(); + ArrayList allFiles = new ArrayList(); + Iterator it = FileUtils.iterateFiles(directory, null, true); + while (it.hasNext()) { + File f = it.next(); + allFiles.add(f); + } + long t1 = System.currentTimeMillis(); + LOG.debug("Time taken to recursive [{}] took [{}] sec", + allFiles.size(), ((t1 - startTime) / 1000)); + + String dataStorePath = directory.getAbsolutePath(); + // convert to java path format + dataStorePath = dataStorePath.replace("\\", "/"); + LOG.info("directoryPath = " + dataStorePath); + + String tmpPath = tmp.getAbsolutePath(); + tmpPath = tmpPath.replace("\\", "/"); + LOG.debug("tmp path [{}]", tmpPath); + long time = System.currentTimeMillis(); + int count = 0; + for (File f : allFiles) { + if (f.exists()) { + count++; + String name = f.getPath(); + String filePath = f.getAbsolutePath(); + // convert to java path format + name = name.replace("\\", "/"); + filePath = filePath.replace("\\", "/"); + // skipped any temp file + if(filePath.startsWith(tmpPath) ) { + LOG.info ("tmp file [{}] skipped ", filePath); + continue; + } + if (filePath.startsWith(dataStorePath)) { + name = filePath.substring(dataStorePath.length()); + } + if (name.startsWith("/") || name.startsWith("\\")) { + name = name.substring(1); + } + store(name, f); + long now = System.currentTimeMillis(); + if (now > time + 10000) { + LOG.info("Processed {" + (count) + "}/{" + allFiles.size() + "}"); + time = now; + } + } + } + LOG.debug( + "Processed [{}]/[{}], currentSizeInBytes = [{}], maxSizeInBytes = [{}], cache.filecount = [{}]", + new Object[] { count, allFiles.size(), + cache.currentSizeInBytes, cache.maxSizeInBytes, + cache.size() }); + long t3 = System.currentTimeMillis(); + LOG.info("Time to build cache of [{}] files took [{}] sec", + allFiles.size(), ((t3 - startTime) / 1000)); + } + } +} + diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/MultiDataStore.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/MultiDataStore.java new file mode 100644 index 00000000000..902a4bd4497 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/MultiDataStore.java @@ -0,0 +1,722 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Iterator; +import java.util.concurrent.locks.ReentrantLock; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.oak.spi.blob.fs.FileSystem; +import org.apache.jackrabbit.oak.spi.blob.fs.FileSystemException; +import org.apache.jackrabbit.oak.spi.blob.fs.FileSystemResource; +import org.apache.jackrabbit.oak.spi.blob.fs.local.LocalFileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A MultiDataStore can handle two independent DataStores. + *

+ * Attention: You will lost the global single instance mechanism ! + *

+ * It can be used if you have two storage systems. One for fast access and a + * other one like a archive DataStore on a slower storage system. All Files will + * be added to the primary DataStore. On read operations first the primary + * dataStore will be used and if no Record is found the archive DataStore will + * be used. The GarabageCollector will only remove files from the archive + * DataStore. + *

+ * The internal MoveDataTask will be started automatically and could be + * configured with the following properties. + *

+ * The Configuration: + * + *

+ * <DataStore class="org.apache.jackrabbit.oak.spi.blob.data.MultiDataStore">
+ *     <param name="{@link #setMaxAge(int) maxAge}" value="60"/>
+ *     <param name="{@link #setMoveDataTaskSleep(int) moveDataTaskSleep}" value="604800"/>
+ *     <param name="{@link #setMoveDataTaskFirstRunHourOfDay(int) moveDataTaskFirstRunHourOfDay}" value="1"/>
+ *     <param name="{@link #setSleepBetweenRecords(long) sleepBetweenRecords}" value="100"/>
+ *     <param name="{@link #setDelayedDelete(boolean) delayedDelete}" value="false"/>
+ *     <param name="{@link #setDelayedDeleteSleep(long) delayedDeleteSleep}" value="86400"/>
+ *     <param name="primary" value="org.apache.jackrabbit.core.data.db.DbDataStore">
+ *        <param .../>
+ *     </param>
+ *     <param name="archive" value="org.apache.jackrabbit.oak.spi.blob.data.FileDataStore">
+ *        <param .../>
+ *     </param>
+ * </DataStore>
+ * 
+ * + *
    + *
  • maxAge: defines how many days the content will reside in the + * primary data store. DataRecords that have been added before this time span + * will be moved to the archive data store. (default = 60)
  • + *
  • moveDataTaskSleep: specifies the sleep time of the + * moveDataTaskThread in seconds. (default = 60 * 60 * 24 * 7, which equals 7 + * days)
  • + *
  • moveDataTaskNextRunHourOfDay: specifies the hour at which + * the moveDataTaskThread initiates its first run (default = 1 + * which means 01:00 at night)
  • + *
  • sleepBetweenRecords: specifies the delay in milliseconds + * between scanning data records (default = 100)
  • + *
  • delayedDelete: its possible to delay the delete operation on + * the primary data store. The DataIdentifiers will be written to a temporary + * file. The file will be processed after a defined sleep (see + * delayedDeleteSleep) It's useful if you like to create a snapshot + * of the primary data store backend in the meantime before the data will be + * deleted. (default = false)
  • + *
  • delayedDeleteSleep: specifies the sleep time of the + * delayedDeleteTaskThread in seconds. (default = 60 * 60 * 24, which equals 1 + * day). This means the delayed delete from the primary data store will be + * processed after one day.
  • + *
+ */ +public class MultiDataStore implements DataStore { + + /** + * Logger instance + */ + private static Logger log = LoggerFactory.getLogger(MultiDataStore.class); + + private DataStore primaryDataStore; + private DataStore archiveDataStore; + + /** + * Max Age in days. + */ + private int maxAge = 60; + + /** + * ReentrantLock that is used while the MoveDataTask is running. + */ + private ReentrantLock moveDataTaskLock = new ReentrantLock(); + private boolean moveDataTaskRunning = false; + private Thread moveDataTaskThread; + + /** + * The sleep time in seconds of the MoveDataTask, 7 day default. + */ + private int moveDataTaskSleep = 60 * 60 * 24 * 7; + + /** + * Indicates when the next run of the move task is scheduled. The first run + * is scheduled by default at 01:00 hours. + */ + private Calendar moveDataTaskNextRun = Calendar.getInstance(); + + /** + * Its possible to delay the delete operation on the primary data store + * while move task is running. The delete will be executed after defined + * delayDeleteSleep. + */ + private boolean delayedDelete = false; + + /** + * The sleep time in seconds to delay remove operation on the primary data + * store, 1 day default. + */ + private long delayedDeleteSleep = 60 * 60 * 24; + + /** + * File that holds the data identifiers if delayDelete is enabled. + */ + private FileSystemResource identifiersToDeleteFile = null; + + private Thread deleteDelayedIdentifiersTaskThread; + + /** + * Name of the file which holds the identifiers if deleayed delete is + * enabled + */ + private final String IDENTIFIERS_TO_DELETE_FILE_KEY = "identifiersToDelete"; + + /** + * The delay time in milliseconds between scanning data records, 100 + * default. + */ + private long sleepBetweenRecords = 100; + + { + if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= 1) { + moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1); + } + moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, 1); + moveDataTaskNextRun.set(Calendar.MINUTE, 0); + moveDataTaskNextRun.set(Calendar.SECOND, 0); + moveDataTaskNextRun.set(Calendar.MILLISECOND, 0); + } + + /** + * Setter for the primary dataStore + * + * @param dataStore + */ + public void setPrimaryDataStore(DataStore dataStore) { + this.primaryDataStore = dataStore; + } + + /** + * Setter for the archive dataStore + * + * @param dataStore + */ + public void setArchiveDataStore(DataStore dataStore) { + this.archiveDataStore = dataStore; + } + + /** + * Check if a record for the given identifier exists in the primary data + * store. If not found there it will be returned from the archive data + * store. If no record exists, this method returns null. + * + * @param identifier + * data identifier + * @return the record if found, and null if not + */ + public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { + if (moveDataTaskRunning) { + moveDataTaskLock.lock(); + } + try { + DataRecord dataRecord = primaryDataStore.getRecordIfStored(identifier); + if (dataRecord == null) { + dataRecord = archiveDataStore.getRecordIfStored(identifier); + } + return dataRecord; + } finally { + if (moveDataTaskRunning) { + moveDataTaskLock.unlock(); + } + } + } + + /** + * Returns the identified data record from the primary data store. If not + * found there it will be returned from the archive data store. The given + * identifier should be the identifier of a previously saved data record. + * Since records are never removed, there should never be cases where the + * identified record is not found. Abnormal cases like that are treated as + * errors and handled by throwing an exception. + * + * @param identifier + * data identifier + * @return identified data record + * @throws DataStoreException + * if the data store could not be accessed, or if the given + * identifier is invalid + */ + public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException { + if (moveDataTaskRunning) { + moveDataTaskLock.lock(); + } + try { + return primaryDataStore.getRecord(identifier); + } catch (DataStoreException e) { + return archiveDataStore.getRecord(identifier); + } finally { + if (moveDataTaskRunning) { + moveDataTaskLock.unlock(); + } + } + } + + /** + * Creates a new data record in the primary data store. The given binary + * stream is consumed and a binary record containing the consumed stream is + * created and returned. If the same stream already exists in another + * record, then that record is returned instead of creating a new one. + *

+ * The given stream is consumed and not closed by this + * method. It is the responsibility of the caller to close the stream. A + * typical call pattern would be: + * + *

+     *     InputStream stream = ...;
+     *     try {
+     *         record = store.addRecord(stream);
+     *     } finally {
+     *         stream.close();
+     *     }
+     * 
+ * + * @param stream + * binary stream + * @return data record that contains the given stream + * @throws DataStoreException + * if the data store could not be accessed + */ + public DataRecord addRecord(InputStream stream) throws DataStoreException { + return primaryDataStore.addRecord(stream); + } + + /** + * From now on, update the modified date of an object even when accessing it + * in the archive data store. Usually, the modified date is only updated + * when creating a new object, or when a new link is added to an existing + * object. When this setting is enabled, even getLength() will update the + * modified date. + * + * @param before + * - update the modified date to the current time if it is older + * than this value + */ + public void updateModifiedDateOnAccess(long before) { + archiveDataStore.updateModifiedDateOnAccess(before); + } + + /** + * Delete objects that have a modified date older than the specified date + * from the archive data store. + * + * @param min + * the minimum time + * @return the number of data records deleted + * @throws DataStoreException + */ + public int deleteAllOlderThan(long min) throws DataStoreException { + return archiveDataStore.deleteAllOlderThan(min); + } + + /** + * Get all identifiers from the archive data store. + * + * @return an iterator over all DataIdentifier objects + * @throws DataStoreException + * if the list could not be read + */ + public Iterator getAllIdentifiers() throws DataStoreException { + return archiveDataStore.getAllIdentifiers(); + } + + public DataRecord getRecordFromReference(String reference) + throws DataStoreException { + DataRecord record = primaryDataStore.getRecordFromReference(reference); + if (record == null) { + record = archiveDataStore.getRecordFromReference(reference); + } + return record; + } + + /** + * {@inheritDoc} + */ + public void init(String homeDir) throws RepositoryException { + if (delayedDelete) { + // First initialize the identifiersToDeleteFile + LocalFileSystem fileSystem = new LocalFileSystem(); + fileSystem.setRoot(new File(homeDir)); + identifiersToDeleteFile = new FileSystemResource(fileSystem, FileSystem.SEPARATOR + + IDENTIFIERS_TO_DELETE_FILE_KEY); + } + moveDataTaskThread = new Thread(new MoveDataTask(), + "Jackrabbit-MulitDataStore-MoveDataTaskThread"); + moveDataTaskThread.setDaemon(true); + moveDataTaskThread.start(); + log.info("MultiDataStore-MoveDataTask thread started; first run scheduled at " + + moveDataTaskNextRun.getTime()); + if (delayedDelete) { + try { + // Run on startup the DeleteDelayedIdentifiersTask only if the + // file exists and modify date is older than the + // delayedDeleteSleep timeout ... + if (identifiersToDeleteFile != null + && identifiersToDeleteFile.exists() + && (identifiersToDeleteFile.lastModified() + (delayedDeleteSleep * 1000)) < System + .currentTimeMillis()) { + deleteDelayedIdentifiersTaskThread = new Thread( + //Start immediately ... + new DeleteDelayedIdentifiersTask(0L), + "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread"); + deleteDelayedIdentifiersTaskThread.setDaemon(true); + deleteDelayedIdentifiersTaskThread.start(); + log.info("Old entries in the " + IDENTIFIERS_TO_DELETE_FILE_KEY + + " File found. DeleteDelayedIdentifiersTask-Thread started now."); + } + } catch (FileSystemException e) { + throw new RepositoryException("I/O error while reading from '" + + identifiersToDeleteFile.getPath() + "'", e); + } + } + } + + /** + * Get the minimum size of an object that should be stored in the primary + * data store. + * + * @return the minimum size in bytes + */ + public int getMinRecordLength() { + return primaryDataStore.getMinRecordLength(); + } + + /** + * {@inheritDoc} + */ + public void close() throws DataStoreException { + DataStoreException lastException = null; + // 1. close the primary data store + try { + primaryDataStore.close(); + } catch (DataStoreException e) { + lastException = e; + } + // 2. close the archive data store + try { + archiveDataStore.close(); + } catch (DataStoreException e) { + if (lastException != null) { + lastException = new DataStoreException(lastException); + } + } + // 3. if moveDataTaskThread is running interrupt it + try { + if (moveDataTaskRunning) { + moveDataTaskThread.interrupt(); + } + } catch (Exception e) { + if (lastException != null) { + lastException = new DataStoreException(lastException); + } + } + // 4. if deleteDelayedIdentifiersTaskThread is running interrupt it + try { + if (deleteDelayedIdentifiersTaskThread != null + && deleteDelayedIdentifiersTaskThread.isAlive()) { + deleteDelayedIdentifiersTaskThread.interrupt(); + } + } catch (Exception e) { + if (lastException != null) { + lastException = new DataStoreException(lastException); + } + } + if (lastException != null) { + throw lastException; + } + } + + /** + * {@inheritDoc} + */ + public void clearInUse() { + archiveDataStore.clearInUse(); + } + + public int getMaxAge() { + return maxAge; + } + + public void setMaxAge(int maxAge) { + this.maxAge = maxAge; + } + + public int getMoveDataTaskSleep() { + return moveDataTaskSleep; + } + + public int getMoveDataTaskFirstRunHourOfDay() { + return moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY); + } + + public void setMoveDataTaskSleep(int sleep) { + this.moveDataTaskSleep = sleep; + } + + public void setMoveDataTaskFirstRunHourOfDay(int hourOfDay) { + moveDataTaskNextRun = Calendar.getInstance(); + if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= hourOfDay) { + moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1); + } + moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, hourOfDay); + moveDataTaskNextRun.set(Calendar.MINUTE, 0); + moveDataTaskNextRun.set(Calendar.SECOND, 0); + moveDataTaskNextRun.set(Calendar.MILLISECOND, 0); + } + + public void setSleepBetweenRecords(long millis) { + this.sleepBetweenRecords = millis; + } + + public long getSleepBetweenRecords() { + return sleepBetweenRecords; + } + + public boolean isDelayedDelete() { + return delayedDelete; + } + + public void setDelayedDelete(boolean delayedDelete) { + this.delayedDelete = delayedDelete; + } + + public long getDelayedDeleteSleep() { + return delayedDeleteSleep; + } + + public void setDelayedDeleteSleep(long delayedDeleteSleep) { + this.delayedDeleteSleep = delayedDeleteSleep; + } + + /** + * Writes the given DataIdentifier to the delayedDeletedFile. + * + * @param identifier + * @return boolean true if it was successful otherwise false + */ + private boolean writeDelayedDataIdentifier(DataIdentifier identifier) { + BufferedWriter writer = null; + try { + File identifierFile = new File( + ((LocalFileSystem) identifiersToDeleteFile.getFileSystem()).getPath(), + identifiersToDeleteFile.getPath()); + writer = new BufferedWriter(new FileWriter(identifierFile, true)); + writer.write(identifier.toString()); + return true; + } catch (Exception e) { + log.warn("I/O error while saving DataIdentifier (stacktrace on DEBUG log level) to '" + + identifiersToDeleteFile.getPath() + "': " + e.getMessage()); + log.debug("Root cause: ", e); + return false; + } finally { + IOUtils.closeQuietly(writer); + } + } + + /** + * Purges the delayedDeletedFile. + * + * @return boolean true if it was successful otherwise false + */ + private boolean purgeDelayedDeleteFile() { + BufferedWriter writer = null; + try { + writer = new BufferedWriter(new OutputStreamWriter( + identifiersToDeleteFile.getOutputStream())); + writer.write(""); + return true; + } catch (Exception e) { + log.warn("I/O error while purging (stacktrace on DEBUG log level) the " + + IDENTIFIERS_TO_DELETE_FILE_KEY + " file '" + + identifiersToDeleteFile.getPath() + "': " + e.getMessage()); + log.debug("Root cause: ", e); + return false; + } finally { + IOUtils.closeQuietly(writer); + } + } + + /** + * Class for maintaining the MultiDataStore. It will be used to move the + * content of the primary data store to the archive data store. + */ + public class MoveDataTask implements Runnable { + + /** + * {@inheritDoc} + */ + public void run() { + while (!Thread.currentThread().isInterrupted()) { + try { + log.info("Next move-data task run scheduled at " + + moveDataTaskNextRun.getTime()); + long sleepTime = moveDataTaskNextRun.getTimeInMillis() + - System.currentTimeMillis(); + if (sleepTime > 0) { + Thread.sleep(sleepTime); + } + moveDataTaskRunning = true; + moveOutdatedData(); + moveDataTaskRunning = false; + moveDataTaskNextRun.add(Calendar.SECOND, moveDataTaskSleep); + if (delayedDelete) { + if (deleteDelayedIdentifiersTaskThread != null + && deleteDelayedIdentifiersTaskThread.isAlive()) { + log.warn("The DeleteDelayedIdentifiersTask-Thread is already running."); + } else { + deleteDelayedIdentifiersTaskThread = new Thread( + new DeleteDelayedIdentifiersTask(delayedDeleteSleep), + "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread"); + deleteDelayedIdentifiersTaskThread.setDaemon(true); + deleteDelayedIdentifiersTaskThread.start(); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + log.warn("Interrupted: stopping move-data task."); + } + + /** + * Moves outdated data from primary to archive data store + */ + protected void moveOutdatedData() { + try { + long now = System.currentTimeMillis(); + long maxAgeMilli = 1000L * 60 * 60 * 24 * maxAge; + log.debug("Collecting all Identifiers from PrimaryDataStore..."); + Iterator allIdentifiers = primaryDataStore.getAllIdentifiers(); + int moved = 0; + while (allIdentifiers.hasNext()) { + DataIdentifier identifier = allIdentifiers.next(); + DataRecord dataRecord = primaryDataStore.getRecord(identifier); + if ((dataRecord.getLastModified() + maxAgeMilli) < now) { + try { + moveDataTaskLock.lock(); + if (delayedDelete) { + // first write it to the file and then add it to + // the archive data store ... + if (writeDelayedDataIdentifier(identifier)) { + archiveDataStore.addRecord(dataRecord.getStream()); + moved++; + } + } else { + // first add it and then delete it .. not really + // atomic ... + archiveDataStore.addRecord(dataRecord.getStream()); + ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier); + moved++; + } + if (moved % 100 == 0) { + log.debug("Moving DataRecord's... ({})", moved); + } + } catch (DataStoreException e) { + log.error("Failed to move DataRecord. DataIdentifier: " + identifier, e); + } finally { + moveDataTaskLock.unlock(); + } + } + // Give other threads time to use the MultiDataStore while + // MoveDataTask is running.. + Thread.sleep(sleepBetweenRecords); + } + if (delayedDelete) { + log.info("Moved " + + moved + + " DataRecords to the archive data store. The DataRecords in the primary data store will be removed in " + + delayedDeleteSleep + " seconds."); + } else { + log.info("Moved " + moved + " DataRecords to the archive data store."); + } + } catch (Exception e) { + log.warn("Failed to run move-data task.", e); + } + } + } + + /** + * Class to clean up the delayed DataRecords from the primary data store. + */ + public class DeleteDelayedIdentifiersTask implements Runnable { + + boolean run = true; + private long sleepTime = 0L; + + /** + * Constructor + * @param sleep how long this DeleteDelayedIdentifiersTask should sleep in seconds. + */ + public DeleteDelayedIdentifiersTask(long sleep) { + this.sleepTime = (sleep * 1000L); + } + + @Override + public void run() { + if (moveDataTaskRunning) { + log.warn("It's not supported to run the DeleteDelayedIdentifiersTask while the MoveDataTask is running."); + return; + } + while (run && !Thread.currentThread().isInterrupted()) { + if (sleepTime > 0) { + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + log.info("Start to delete DataRecords from the primary data store."); + BufferedReader reader = null; + ArrayList problemIdentifiers = new ArrayList(); + try { + int deleted = 0; + reader = new BufferedReader(new InputStreamReader( + identifiersToDeleteFile.getInputStream())); + while (true) { + String s = reader.readLine(); + if (s == null || s.equals("")) { + break; + } + DataIdentifier identifier = new DataIdentifier(s); + try { + moveDataTaskLock.lock(); + ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier); + deleted++; + } catch (DataStoreException e) { + log.error("Failed to delete DataRecord. DataIdentifier: " + identifier, + e); + problemIdentifiers.add(identifier); + } finally { + moveDataTaskLock.unlock(); + } + // Give other threads time to use the MultiDataStore + // while + // DeleteDelayedIdentifiersTask is running.. + Thread.sleep(sleepBetweenRecords); + } + log.info("Deleted " + deleted + " DataRecords from the primary data store."); + if (problemIdentifiers.isEmpty()) { + try { + identifiersToDeleteFile.delete(); + } catch (FileSystemException e) { + log.warn("Unable to delete the " + IDENTIFIERS_TO_DELETE_FILE_KEY + + " File."); + if (!purgeDelayedDeleteFile()) { + log.error("Unable to purge the " + IDENTIFIERS_TO_DELETE_FILE_KEY + + " File."); + } + } + } else { + if (purgeDelayedDeleteFile()) { + for (int x = 0; x < problemIdentifiers.size(); x++) { + writeDelayedDataIdentifier(problemIdentifiers.get(x)); + } + } + } + } catch (InterruptedException e) { + log.warn("Interrupted: stopping delayed-delete task."); + Thread.currentThread().interrupt(); + } catch (Exception e) { + log.warn("Failed to run delayed-delete task.", e); + } finally { + IOUtils.closeQuietly(reader); + run = false; + } + } + } + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/MultiDataStoreAware.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/MultiDataStoreAware.java new file mode 100644 index 00000000000..0b3c4a8be99 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/MultiDataStoreAware.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import org.apache.jackrabbit.oak.spi.blob.data.MultiDataStore.MoveDataTask; + +/** + * To use a DataStore within a MultiDataStore it must implement this + * MultiDataStoreAware Interface. It extends a DataStore to delete a single + * DataRecord. + */ +public interface MultiDataStoreAware { + + /** + * Deletes a single DataRecord based on the given identifier. Delete will + * only be used by the {@link MoveDataTask}. + * + * @param identifier + * data identifier + * @throws DataStoreException + * if the data store could not be accessed, or if the given + * identifier is invalid + */ + void deleteRecord(DataIdentifier identifier) throws DataStoreException; + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/package-info.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/package-info.java new file mode 100755 index 00000000000..6c2276baa7c --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* see JCR-4060 */ +@org.osgi.annotation.versioning.Version("2.15.0") +package org.apache.jackrabbit.oak.spi.blob.data; diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/util/NamedThreadFactory.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/util/NamedThreadFactory.java new file mode 100644 index 00000000000..606b035bc83 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/util/NamedThreadFactory.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data.util; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * This class extends {@link ThreadFactory} to creates named threads. + */ +public class NamedThreadFactory implements ThreadFactory { + + private AtomicInteger threadCount = new AtomicInteger(1); + + String threadPrefixName; + + public NamedThreadFactory(String threadPrefixName) { + super(); + this.threadPrefixName = threadPrefixName; + } + + public Thread newThread(Runnable r) { + Thread thread = new Thread(r); + thread.setContextClassLoader(getClass().getClassLoader()); + thread.setName(threadPrefixName + "-" + threadCount.getAndIncrement()); + return thread; + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/util/package-info.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/util/package-info.java new file mode 100755 index 00000000000..4e2c1e84f93 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/data/util/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* see JCR-4060 */ +@org.osgi.annotation.versioning.Version("2.13.5") +package org.apache.jackrabbit.oak.spi.blob.data.util; diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystem.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystem.java new file mode 100644 index 00000000000..2dfbb85da7f --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystem.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.fs; + +import java.io.InputStream; +import java.io.OutputStream; + +/** + * The FileSystem interface is an abstraction of a virtual + * file system. The similarities of its method names with with the methods + * of the java.io.File class are intentional. + *
+ * Implementations of this interface expose a file system-like resource. + * File system-like resources include WebDAV-enabled servers, local file systems, + * and so forth. + */ +public interface FileSystem { + + /** + * File separator + */ + String SEPARATOR = "/"; + + /** + * File separator character + */ + char SEPARATOR_CHAR = '/'; + + /** + * Initialize the file system + * + * @throws FileSystemException if the file system initialization fails + */ + void init() throws FileSystemException; + + /** + * Close the file system. After calling this method, the file system is no + * longer accessible. + * + * @throws FileSystemException + */ + void close() throws FileSystemException; + + /** + * Returns an input stream of the contents of the file denoted by this path. + * + * @param filePath the path of the file. + * @return an input stream of the contents of the file. + * @throws FileSystemException if the file does not exist + * or if it cannot be read from + */ + InputStream getInputStream(String filePath) throws FileSystemException; + + /** + * Returns an output stream for writing bytes to the file denoted by this path. + * The file will be created if it doesn't exist. If the file exists, its contents + * will be overwritten. + * + * @param filePath the path of the file. + * @return an output stream for writing bytes to the file. + * @throws FileSystemException if the file cannot be written to or created + */ + OutputStream getOutputStream(String filePath) throws FileSystemException; + + /** + * Creates the folder named by this path, including any necessary but + * nonexistent parent folders. Note that if this operation fails it + * may have succeeded in creating some of the necessary parent folders. + * + * @param folderPath the path of the folder to be created. + * @throws FileSystemException if a file system entry denoted by path + * already exists or if another error occurs. + */ + void createFolder(String folderPath) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path exists. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path exists; false otherwise. + * @throws FileSystemException + */ + boolean exists(String path) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path exists and + * is a file. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path is a file; false otherwise. + * @throws FileSystemException + */ + boolean isFile(String path) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path exists and + * is a folder. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path is a folder; false otherwise. + * @throws FileSystemException + */ + boolean isFolder(String path) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path has child entries. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path has child entries; false otherwise. + * @throws FileSystemException + */ + boolean hasChildren(String path) throws FileSystemException; + + /** + * Returns the length of the file denoted by this path. + * + * @param filePath the path of the file. + * @return The length, in bytes, of the file denoted by this path, + * or -1L if the length can't be determined. + * @throws FileSystemException if the path does not denote an existing file. + */ + long length(String filePath) throws FileSystemException; + + /** + * Returns the time that the file system entry denoted by this path + * was last modified. + * + * @param path the path of a file system entry. + * @return A long value representing the time the file system entry was + * last modified, measured in milliseconds since the epoch + * (00:00:00 GMT, January 1, 1970), or 0L if the modification + * time can't be determined. + * @throws FileSystemException if the file system entry does not exist. + */ + long lastModified(String path) throws FileSystemException; + + /** + * Returns an array of strings naming the files and folders + * in the folder denoted by this path. + * + * @param folderPath the path of the folder whose contents is to be listed. + * @return an array of strings naming the files and folders + * in the folder denoted by this path. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + String[] list(String folderPath) throws FileSystemException; + + /** + * Returns an array of strings naming the files in the folder + * denoted by this path. + * + * @param folderPath the path of the folder whose contents is to be listed. + * @return an array of strings naming the files in the folder + * denoted by this path. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + String[] listFiles(String folderPath) throws FileSystemException; + + /** + * Returns an array of strings naming the folders in the folder + * denoted by this path. + * + * @param folderPath the path of the folder whose contents is to be listed. + * @return an array of strings naming the folders in the folder + * denoted by this path. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + String[] listFolders(String folderPath) throws FileSystemException; + + /** + * Deletes the file denoted by this path. + * + * @param filePath the path of the file to be deleted. + * @throws FileSystemException if this path does not denote a file or if + * another error occurs. + */ + void deleteFile(String filePath) throws FileSystemException; + + /** + * Deletes the folder denoted by this path. Any contents of this folder + * (folders and files) will be deleted recursively. + * + * @param folderPath the path of the folder to be deleted. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + void deleteFolder(String folderPath) throws FileSystemException; + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemException.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemException.java new file mode 100644 index 00000000000..777928a38db --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemException.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.fs; + +/** + * The FileSystemException signals an error within a file system + * operation. FileSystemExceptions are thrown by {@link FileSystem} + * implementations. + */ +public class FileSystemException extends Exception { + + /** + * Constructs a new instance of this class with the specified detail + * message. + * + * @param message the detail message. The detail message is saved for + * later retrieval by the {@link #getMessage()} method. + */ + public FileSystemException(String message) { + super(message); + } + + /** + * Constructs a new instance of this class with the specified detail + * message and root cause. + * + * @param message the detail message. The detail message is saved for + * later retrieval by the {@link #getMessage()} method. + * @param rootCause root failure cause + */ + public FileSystemException(String message, Throwable rootCause) { + super(message, rootCause); + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemPathUtil.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemPathUtil.java new file mode 100644 index 00000000000..c73821aa204 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemPathUtil.java @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.fs; + +import java.io.ByteArrayOutputStream; +import java.util.BitSet; + + +/** + * Utility class for handling paths in a file system. + */ +public final class FileSystemPathUtil { + + /** + * Array of lowercase hexadecimal characters used in creating hex escapes. + */ + private static final char[] HEX_TABLE = "0123456789abcdef".toCharArray(); + + /** + * The escape character used to mark hex escape sequences. + */ + private static final char ESCAPE_CHAR = '%'; + + /** + * The list of characters that are not encoded by the escapeName(String) + * and unescape(String) methods. They contains the characters + * which can safely be used in file names: + */ + public static final BitSet SAFE_NAMECHARS; + + /** + * The list of characters that are not encoded by the escapePath(String) + * and unescape(String) methods. They contains the characters + * which can safely be used in file paths: + */ + public static final BitSet SAFE_PATHCHARS; + + static { + // build list of valid name characters + SAFE_NAMECHARS = new BitSet(256); + int i; + for (i = 'a'; i <= 'z'; i++) { + SAFE_NAMECHARS.set(i); + } + for (i = 'A'; i <= 'Z'; i++) { + SAFE_NAMECHARS.set(i); + } + for (i = '0'; i <= '9'; i++) { + SAFE_NAMECHARS.set(i); + } + SAFE_NAMECHARS.set('-'); + SAFE_NAMECHARS.set('_'); + SAFE_NAMECHARS.set('.'); + + // build list of valid path characters (includes name characters) + SAFE_PATHCHARS = (BitSet) SAFE_NAMECHARS.clone(); + SAFE_PATHCHARS.set(FileSystem.SEPARATOR_CHAR); + } + + /** + * private constructor + */ + private FileSystemPathUtil() { + } + + /** + * Escapes the given string using URL encoding for all bytes not included + * in the given set of safe characters. + * + * @param s the string to escape + * @param safeChars set of safe characters (bytes) + * @return escaped string + */ + private static String escape(String s, BitSet safeChars) { + byte[] bytes = s.getBytes(); + StringBuilder out = new StringBuilder(bytes.length); + for (int i = 0; i < bytes.length; i++) { + int c = bytes[i] & 0xff; + if (safeChars.get(c) && c != ESCAPE_CHAR) { + out.append((char) c); + } else { + out.append(ESCAPE_CHAR); + out.append(HEX_TABLE[(c >> 4) & 0x0f]); + out.append(HEX_TABLE[(c) & 0x0f]); + } + } + return out.toString(); + } + + /** + * Encodes the specified path. Same as + * {@link #escapeName(String)} except that the separator + * character / is regarded as a legal path character + * that needs no escaping. + * + * @param path the path to encode. + * @return the escaped path + */ + public static String escapePath(String path) { + return escape(path, SAFE_PATHCHARS); + } + + /** + * Encodes the specified name. Same as + * {@link #escapePath(String)} except that the separator character + * / is regarded as an illegal character that needs + * escaping. + * + * @param name the name to encode. + * @return the escaped name + */ + public static String escapeName(String name) { + return escape(name, SAFE_NAMECHARS); + } + + /** + * Decodes the specified path/name. + * + * @param pathOrName the escaped path/name + * @return the unescaped path/name + */ + public static String unescape(String pathOrName) { + ByteArrayOutputStream out = new ByteArrayOutputStream(pathOrName.length()); + for (int i = 0; i < pathOrName.length(); i++) { + char c = pathOrName.charAt(i); + if (c == ESCAPE_CHAR) { + try { + out.write(Integer.parseInt(pathOrName.substring(i + 1, i + 3), 16)); + } catch (NumberFormatException e) { + IllegalArgumentException iae = new IllegalArgumentException("Failed to unescape escape sequence"); + iae.initCause(e); + throw iae; + } + i += 2; + } else { + out.write(c); + } + } + return new String(out.toByteArray()); + } + + /** + * Tests whether the specified path represents the root path, i.e. "/". + * + * @param path path to test + * @return true if the specified path represents the root path; false otherwise. + */ + public static boolean denotesRoot(String path) { + return path.equals(FileSystem.SEPARATOR); + } + + /** + * Checks if path is a valid path. + * + * @param path the path to be checked + * @throws FileSystemException If path is not a valid path + */ + public static void checkFormat(String path) throws FileSystemException { + if (path == null) { + throw new FileSystemException("null path"); + } + + // path must be absolute, i.e. starting with '/' + if (!path.startsWith(FileSystem.SEPARATOR)) { + throw new FileSystemException("not an absolute path: " + path); + } + + // trailing '/' is not allowed (except for root path) + if (path.endsWith(FileSystem.SEPARATOR) && path.length() > 1) { + throw new FileSystemException("malformed path: " + path); + } + + String[] names = path.split(FileSystem.SEPARATOR); + for (int i = 1; i < names.length; i++) { + // name must not be empty + if (names[i].length() == 0) { + throw new FileSystemException("empty name: " + path); + } + // leading/trailing whitespace is not allowed + String trimmed = names[i].trim(); + if (!trimmed.equals(names[i])) { + throw new FileSystemException("illegal leading or trailing whitespace in name: " + path); + } + } + } + + /** + * Returns the parent directory of the specified path. + * + * @param path a file system path denoting a directory or a file. + * @return the parent directory. + */ + public static String getParentDir(String path) { + int pos = path.lastIndexOf(FileSystem.SEPARATOR_CHAR); + if (pos > 0) { + return path.substring(0, pos); + } + return FileSystem.SEPARATOR; + } + + /** + * Returns the name of the specified path. + * + * @param path a file system path denoting a directory or a file. + * @return the name. + */ + public static String getName(String path) { + int pos = path.lastIndexOf(FileSystem.SEPARATOR_CHAR); + if (pos != -1) { + return path.substring(pos + 1); + } + return path; + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemResource.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemResource.java new file mode 100644 index 00000000000..535ba4829fc --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/FileSystemResource.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.fs; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.commons.io.IOUtils; + +/** + * A FileSystemResource represents a resource (i.e. file) in a + * FileSystem. + */ +public class FileSystemResource { + + protected final FileSystem fs; + + protected final String path; + + static { + // preload FileSystemPathUtil to prevent classloader issues during shutdown + FileSystemPathUtil.class.hashCode(); + } + + /** + * Creates a new FileSystemResource + * + * @param fs the FileSystem where the resource is located + * @param path the path of the resource in the FileSystem + */ + public FileSystemResource(FileSystem fs, String path) { + if (fs == null) { + throw new IllegalArgumentException("invalid file system argument"); + } + this.fs = fs; + + if (path == null) { + throw new IllegalArgumentException("invalid path argument"); + } + this.path = path; + } + + /** + * Returns the FileSystem where this resource is located. + * + * @return the FileSystem where this resource is located. + */ + public FileSystem getFileSystem() { + return fs; + } + + /** + * Returns the path of this resource. + * + * @return the path of this resource. + */ + public String getPath() { + return path; + } + + /** + * Returns the parent directory of this resource. + * + * @return the parent directory. + */ + public String getParentDir() { + return FileSystemPathUtil.getParentDir(path); + } + + /** + * Returns the name of this resource. + * + * @return the name. + */ + public String getName() { + return FileSystemPathUtil.getName(path); + } + + /** + * Creates the parent directory of this resource, including any necessary + * but nonexistent parent directories. + * + * @throws FileSystemException + */ + public synchronized void makeParentDirs() throws FileSystemException { + String parentDir = getParentDir(); + if (!fs.exists(parentDir)) { + fs.createFolder(parentDir); + } + } + + /** + * Deletes this resource. + * Same as {@link #delete(boolean)} called with {@code false}. + * + * @see FileSystem#deleteFile + */ + public void delete() throws FileSystemException { + delete(false); + } + + /** + * Deletes this resource. + * + * @param pruneEmptyParentDirs if true, empty parent folders will + * automatically be deleted + * @see FileSystem#deleteFile + */ + public synchronized void delete(boolean pruneEmptyParentDirs) throws FileSystemException { + fs.deleteFile(path); + if (pruneEmptyParentDirs) { + // prune empty parent folders + String parentDir = FileSystemPathUtil.getParentDir(path); + while (!parentDir.equals(FileSystem.SEPARATOR) + && fs.exists(parentDir) + && !fs.hasChildren(parentDir)) { + fs.deleteFolder(parentDir); + parentDir = FileSystemPathUtil.getParentDir(parentDir); + } + } + } + + /** + * @see FileSystem#exists + */ + public boolean exists() throws FileSystemException { + return fs.exists(path); + } + + /** + * @see FileSystem#getInputStream + */ + public InputStream getInputStream() throws FileSystemException { + return fs.getInputStream(path); + } + + /** + * Spools this resource to the given output stream. + * + * @param out output stream where to spool the resource + * @throws FileSystemException if the input stream for this resource could + * not be obtained + * @throws IOException if an error occurs while while spooling + * @see FileSystem#getInputStream + */ + public void spool(OutputStream out) throws FileSystemException, IOException { + InputStream in = fs.getInputStream(path); + try { + IOUtils.copy(in, out); + } finally { + IOUtils.closeQuietly(in); + } + } + + /** + * @see FileSystem#getOutputStream + */ + public OutputStream getOutputStream() throws FileSystemException { + return fs.getOutputStream(path); + } + + /** + * @see FileSystem#lastModified + */ + public long lastModified() throws FileSystemException { + return fs.lastModified(path); + } + + /** + * @see FileSystem#length + */ + public long length() throws FileSystemException { + return fs.length(path); + } + + //-------------------------------------------< java.lang.Object overrides > + /** + * Returns the path string of this resource. This is just the + * string returned by the {@link #getPath} method. + * + * @return The path string of this resource + */ + public String toString() { + return getPath(); + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj instanceof FileSystemResource) { + FileSystemResource other = (FileSystemResource) obj; + return (path == null ? other.path == null : path.equals(other.path)) + && (fs == null ? other.fs == null : fs.equals(other.fs)); + } + return false; + } + + /** + * Returns zero to satisfy the Object equals/hashCode contract. + * This class is mutable and not meant to be used as a hash key. + * + * @return always zero + * @see Object#hashCode() + */ + public int hashCode() { + return 0; + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/FileUtil.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/FileUtil.java new file mode 100644 index 00000000000..d72003469cd --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/FileUtil.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.fs.local; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; + +/** + * Static utility methods for recursively copying and deleting files and + * directories. + */ +public final class FileUtil { + + /** + * private constructor + */ + private FileUtil() { + } + + /** + * Recursively copies the given file or directory to the + * given destination. + * + * @param src source file or directory + * @param dest destination file or directory + * @throws IOException if the file or directory cannot be copied + */ + public static void copy(File src, File dest) throws IOException { + if (!src.canRead()) { + throw new IOException(src.getPath() + " can't be read from."); + } + if (src.isDirectory()) { + // src is a folder + if (dest.isFile()) { + throw new IOException("can't copy a folder to a file"); + } + if (!dest.exists()) { + dest.mkdirs(); + } + if (!dest.canWrite()) { + throw new IOException("can't write to " + dest.getPath()); + } + File[] children = src.listFiles(); + for (int i = 0; i < children.length; i++) { + copy(children[i], new File(dest, children[i].getName())); + } + } else { + // src is a file + File destParent; + if (dest.isDirectory()) { + // dest is a folder + destParent = dest; + dest = new File(destParent, src.getName()); + } else { + destParent = dest.getParentFile(); + } + if (!destParent.canWrite()) { + throw new IOException("can't write to " + destParent.getPath()); + } + + FileUtils.copyFile(src, dest); + } + } + + /** + * Recursively deletes the given file or directory. + * + * @param f file or directory + * @throws IOException if the file or directory cannot be deleted + */ + public static void delete(File f) throws IOException { + if (f.isDirectory()) { + // it's a folder, list children first + File[] children = f.listFiles(); + for (int i = 0; i < children.length; i++) { + delete(children[i]); + } + } + if (!f.delete()) { + throw new IOException("Unable to delete '" + f.getPath() + "'"); + } + } +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/HandleMonitor.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/HandleMonitor.java new file mode 100644 index 00000000000..2712f91d019 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/HandleMonitor.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.fs.local; + +import org.apache.jackrabbit.util.LazyFileInputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.HashSet; + +/** + * This Class implements a very simple open handle monitor for the local + * file system. This is usefull, if the list of open handles, referenced by + * an open FileInputStream() should be tracked. This can cause problems on + * windows filesystems where open files cannot be deleted. + */ +public class HandleMonitor { + + /** + * The default logger + */ + private static Logger log = LoggerFactory.getLogger(HandleMonitor.class); + + /** + * the map of open handles (key=File, value=Handle) + */ + private HashMap openHandles = new HashMap(); + + /** + * Opens a file and returns an InputStream + * + * @param file + * @return + * @throws FileNotFoundException + */ + public InputStream open(File file) throws FileNotFoundException { + Handle handle = getHandle(file); + InputStream in = handle.open(); + return in; + } + + /** + * Checks, if the file is open + * @param file + * @return + */ + public boolean isOpen(File file) { + return openHandles.containsKey(file); + } + + /** + * Closes a file + * @param file + */ + private void close(File file) { + openHandles.remove(file); + } + + /** + * Returns the handle for a file. + * @param file + * @return + */ + private Handle getHandle(File file) { + Handle handle = openHandles.get(file); + if (handle == null) { + handle = new Handle(file); + openHandles.put(file, handle); + } + return handle; + } + + /** + * Dumps the contents of this monitor + */ + public void dump() { + log.info("Number of open files: " + openHandles.size()); + for (File file : openHandles.keySet()) { + Handle handle = openHandles.get(file); + handle.dump(); + } + } + + /** + * Dumps the information for a file + * @param file + */ + public void dump(File file) { + Handle handle = openHandles.get(file); + if (handle != null) { + handle.dump(true); + } + } + + /** + * Class representing all open handles to a file + */ + private class Handle { + + /** + * the file of this handle + */ + private File file; + + /** + * all open streams of this handle + */ + private HashSet streams = new HashSet(); + + /** + * Creates a new handle for a file + * @param file + */ + private Handle(File file) { + this.file = file; + } + + /** + * opens a stream for this handle + * @return + * @throws FileNotFoundException + */ + private InputStream open() throws FileNotFoundException { + Handle.MonitoredInputStream in = new Handle.MonitoredInputStream(file); + streams.add(in); + return in; + } + + /** + * Closes a stream + * @param in + */ + private void close(MonitoredInputStream in) { + streams.remove(in); + if (streams.isEmpty()) { + HandleMonitor.this.close(file); + } + } + + /** + * Dumps this handle + */ + private void dump() { + dump(false); + } + + /** + * Dumps this handle + */ + private void dump(boolean detailed) { + if (detailed) { + log.info("- " + file.getPath() + ", " + streams.size()); + for (Handle.MonitoredInputStream in : streams) { + in.dump(); + } + } else { + log.info("- " + file.getPath() + ", " + streams.size()); + } + } + + /** + * Delegating input stream that registers/unregisters itself from the + * handle. + */ + private class MonitoredInputStream extends LazyFileInputStream { + + /** + * throwable of the time, the stream was created + */ + private final Throwable throwable = new Exception(); + + /** + * {@inheritDoc} + */ + private MonitoredInputStream(File file) throws FileNotFoundException { + super(file); + } + + /** + * dumps this stream + */ + private void dump() { + log.info("- opened by : ", throwable); + } + + /** + * {@inheritDoc} + */ + public void close() throws IOException { + // remove myself from the set + Handle.this.close(this); + super.close(); + } + + } + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/LocalFileSystem.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/LocalFileSystem.java new file mode 100644 index 00000000000..ab4bfe75068 --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/LocalFileSystem.java @@ -0,0 +1,386 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.fs.local; + +import org.apache.jackrabbit.oak.spi.blob.fs.FileSystem; +import org.apache.jackrabbit.oak.spi.blob.fs.FileSystemException; +import org.apache.jackrabbit.util.LazyFileInputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileFilter; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * A LocalFileSystem ... + */ +public class LocalFileSystem implements FileSystem { + + private static Logger log = LoggerFactory.getLogger(LocalFileSystem.class); + + private File root; + + private HandleMonitor monitor; + + /** + * Default constructor + */ + public LocalFileSystem() { + } + + public String getPath() { + if (root != null) { + return root.getPath(); + } else { + return null; + } + } + + /** + * Sets the path to the root directory of this local filesystem. please note + * that this method can be called via reflection during initialization and + * must not be altered. + * + * @param rootPath the path to the root directory + */ + public void setPath(String rootPath) { + setRoot(new File(osPath(rootPath))); + } + + public void setRoot(File root) { + this.root = root; + } + + /** + * Enables/Disables the use of the handle monitor. + * + * @param enable + */ + public void setEnableHandleMonitor(String enable) { + setEnableHandleMonitor(Boolean.valueOf(enable).booleanValue()); + } + + /** + * Enables/Disables the use of the handle monitor. + * + * @param enable flag + */ + public void setEnableHandleMonitor(boolean enable) { + if (enable && monitor == null) { + monitor = new HandleMonitor(); + } + if (!enable && monitor != null) { + monitor = null; + } + } + + /** + * Returns true if use of the handle monitor is currently + * enabled, otherwise returns false. + * + * @see #setEnableHandleMonitor(boolean) + */ + public String getEnableHandleMonitor() { + return monitor == null ? "false" : "true"; + } + + private String osPath(String genericPath) { + if (File.separator.equals(SEPARATOR)) { + return genericPath; + } + return genericPath.replace(SEPARATOR_CHAR, File.separatorChar); + } + + //-------------------------------------------< java.lang.Object overrides > + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj instanceof LocalFileSystem) { + LocalFileSystem other = (LocalFileSystem) obj; + if (root == null) { + return other.root == null; + } else { + return root.equals(other.root); + } + } + return false; + } + + /** + * Returns zero to satisfy the Object equals/hashCode contract. + * This class is mutable and not meant to be used as a hash key. + * + * @return always zero + * @see Object#hashCode() + */ + public int hashCode() { + return 0; + } + + //-----------------------------------------------------------< FileSystem > + /** + * {@inheritDoc} + */ + public void init() throws FileSystemException { + if (root == null) { + String msg = "root directory not set"; + log.debug(msg); + throw new FileSystemException(msg); + } + + if (root.exists()) { + if (!root.isDirectory()) { + String msg = "path does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + } else { + if (!root.mkdirs()) { + String msg = "failed to create root"; + log.debug(msg); + throw new FileSystemException(msg); + } + } + log.info("LocalFileSystem initialized at path " + root.getPath()); + if (monitor != null) { + log.info("LocalFileSystem using handle monitor"); + } + } + + /** + * {@inheritDoc} + */ + public void close() throws FileSystemException { + root = null; + } + + /** + * {@inheritDoc} + */ + public void createFolder(String folderPath) throws FileSystemException { + File f = new File(root, osPath(folderPath)); + if (f.exists()) { + String msg = f.getPath() + " already exists"; + log.debug(msg); + throw new FileSystemException(msg); + } + if (!f.mkdirs()) { + String msg = "failed to create folder " + f.getPath(); + log.debug(msg); + throw new FileSystemException(msg); + } + } + + /** + * {@inheritDoc} + */ + public void deleteFile(String filePath) throws FileSystemException { + File f = new File(root, osPath(filePath)); + if (!f.isFile()) { + String msg = f.getPath() + " does not denote an existing file"; + throw new FileSystemException(msg); + } + try { + FileUtil.delete(f); + } catch (IOException ioe) { + String msg = "failed to delete " + f.getPath(); + if (monitor != null && monitor.isOpen(f)) { + log.error("Unable to delete. There are still open streams."); + monitor.dump(f); + } + + throw new FileSystemException(msg, ioe); + } + } + + /** + * {@inheritDoc} + */ + public void deleteFolder(String folderPath) throws FileSystemException { + File f = new File(root, osPath(folderPath)); + if (!f.isDirectory()) { + String msg = f.getPath() + " does not denote an existing folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + try { + FileUtil.delete(f); + } catch (IOException ioe) { + String msg = "failed to delete " + f.getPath(); + log.debug(msg); + throw new FileSystemException(msg, ioe); + } + } + + /** + * {@inheritDoc} + */ + public boolean exists(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.exists(); + } + + /** + * {@inheritDoc} + */ + public InputStream getInputStream(String filePath) + throws FileSystemException { + File f = new File(root, osPath(filePath)); + try { + if (monitor == null) { + return new LazyFileInputStream(f); + } else { + return monitor.open(f); + } + } catch (FileNotFoundException fnfe) { + String msg = f.getPath() + " does not denote an existing file"; + log.debug(msg); + throw new FileSystemException(msg, fnfe); + } + } + + /** + * {@inheritDoc} + */ + public OutputStream getOutputStream(String filePath) + throws FileSystemException { + File f = new File(root, osPath(filePath)); + try { + return new FileOutputStream(f); + } catch (FileNotFoundException fnfe) { + String msg = "failed to get output stream for " + f.getPath(); + log.debug(msg); + throw new FileSystemException(msg, fnfe); + } + } + + /** + * {@inheritDoc} + */ + public boolean hasChildren(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + if (!f.exists()) { + String msg = f.getPath() + " does not exist"; + log.debug(msg); + throw new FileSystemException(msg); + } + if (f.isFile()) { + return false; + } + return (f.list().length > 0); + } + + /** + * {@inheritDoc} + */ + public boolean isFile(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.isFile(); + } + + /** + * {@inheritDoc} + */ + public boolean isFolder(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.isDirectory(); + } + + /** + * {@inheritDoc} + */ + public long lastModified(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.lastModified(); + } + + /** + * {@inheritDoc} + */ + public long length(String filePath) throws FileSystemException { + File f = new File(root, osPath(filePath)); + if (!f.exists()) { + return -1; + } + return f.length(); + } + + /** + * {@inheritDoc} + */ + public String[] list(String folderPath) throws FileSystemException { + File f = new File(root, osPath(folderPath)); + String[] entries = f.list(); + if (entries == null) { + String msg = folderPath + " does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + return entries; + } + + /** + * {@inheritDoc} + */ + public String[] listFiles(String folderPath) throws FileSystemException { + File folder = new File(root, osPath(folderPath)); + File[] files = folder.listFiles(new FileFilter() { + public boolean accept(File f) { + return f.isFile(); + } + }); + if (files == null) { + String msg = folderPath + " does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + String[] entries = new String[files.length]; + for (int i = 0; i < files.length; i++) { + entries[i] = files[i].getName(); + } + return entries; + } + + /** + * {@inheritDoc} + */ + public String[] listFolders(String folderPath) throws FileSystemException { + File file = new File(root, osPath(folderPath)); + File[] folders = file.listFiles(new FileFilter() { + public boolean accept(File f) { + return f.isDirectory(); + } + }); + if (folders == null) { + String msg = folderPath + " does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + String[] entries = new String[folders.length]; + for (int i = 0; i < folders.length; i++) { + entries[i] = folders[i].getName(); + } + return entries; + } + +} diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/package-info.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/package-info.java new file mode 100755 index 00000000000..5d8b3fa170b --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/local/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* see JCR-4060 */ +@org.osgi.annotation.versioning.Version("2.13.5") +package org.apache.jackrabbit.oak.spi.blob.fs.local; diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/package-info.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/package-info.java new file mode 100755 index 00000000000..3dc6dccec2e --- /dev/null +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/fs/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* see JCR-4060 */ +@org.osgi.annotation.versioning.Version("2.13.5") +package org.apache.jackrabbit.oak.spi.blob.fs; diff --git a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/stats/BlobStatsCollector.java b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/stats/BlobStatsCollector.java index 4bf22cf5354..86d36bad9b1 100644 --- a/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/stats/BlobStatsCollector.java +++ b/oak-blob/src/main/java/org/apache/jackrabbit/oak/spi/blob/stats/BlobStatsCollector.java @@ -22,8 +22,9 @@ import java.io.InputStream; import java.util.concurrent.TimeUnit; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.osgi.annotation.versioning.ConsumerType; /** @@ -188,7 +189,7 @@ public void getAllIdentifiersFailed() { } /** * Called when deleting binaries older than a specified date, via - * {@link org.apache.jackrabbit.core.data.DataStore#deleteAllOlderThan(long)}. + * {@link DataStore#deleteAllOlderThan(long)}. * * @param timeTaken time taken to perform the deletion * @param unit unit of time taken @@ -197,21 +198,21 @@ public void getAllIdentifiersFailed() { } void deletedAllOlderThan(long timeTaken, TimeUnit unit, long min); /** - * Called when {@link org.apache.jackrabbit.core.data.DataStore#deleteAllOlderThan(long)} is completed. + * Called when {@link DataStore#deleteAllOlderThan(long)} is completed. * * @param deletedCount count of records deleted */ void deleteAllOlderThanCompleted(int deletedCount); /** - * Called when {@link org.apache.jackrabbit.core.data.DataStore#deleteAllOlderThan(long)} fails. + * Called when {@link DataStore#deleteAllOlderThan(long)} fails. * * @param min time used for determining what to delete */ void deleteAllOlderThanFailed(long min); /** - * Called when a binary is added via {@link org.apache.jackrabbit.core.data.DataStore#addRecord(InputStream)}. + * Called when a binary is added via {@link DataStore#addRecord(InputStream)}. * * @param timeTaken time taken to perform the operation * @param unit unit of time taken @@ -220,20 +221,20 @@ public void getAllIdentifiersFailed() { } void recordAdded(long timeTaken, TimeUnit unit, long size); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#addRecord(InputStream)} is completed. + * Called when a call to {@link DataStore#addRecord(InputStream)} is completed. * * @param blobId id of the record which was added */ void addRecordCompleted(String blobId); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#addRecord(InputStream)} fails. + * Called when a call to {@link DataStore#addRecord(InputStream)} fails. */ void addRecordFailed(); /** - * Called when a {@link org.apache.jackrabbit.core.data.DataRecord} is retrieved via - * {@link org.apache.jackrabbit.core.data.DataStore#getRecord(DataIdentifier)}. + * Called when a {@link DataRecord} is retrieved via + * {@link DataStore#getRecord(DataIdentifier)}. * * @param timeTaken time taken to perform the operation * @param unit unit of time taken @@ -242,22 +243,22 @@ public void getAllIdentifiersFailed() { } void getRecordCalled(long timeTaken, TimeUnit unit, long size); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#getRecord(DataIdentifier)} is completed. + * Called when a call to {@link DataStore#getRecord(DataIdentifier)} is completed. * * @param blobId id of the record retrieved */ void getRecordCompleted(String blobId); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#getRecord(DataIdentifier)} fails. + * Called when a call to {@link DataStore#getRecord(DataIdentifier)} fails. * * @param blobId id of the record */ void getRecordFailed(String blobId); /** - * Called when a {@link org.apache.jackrabbit.core.data.DataRecord} is retrieved via - * {@link org.apache.jackrabbit.core.data.DataStore#getRecordIfStored(DataIdentifier)}. + * Called when a {@link DataRecord} is retrieved via + * {@link DataStore#getRecordIfStored(DataIdentifier)}. * * @param timeTaken time taken to perform the operation * @param unit unit of time taken @@ -266,22 +267,22 @@ public void getAllIdentifiersFailed() { } void getRecordIfStoredCalled(long timeTaken, TimeUnit unit, long size); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#getRecordIfStored(DataIdentifier)} is completed. + * Called when a call to {@link DataStore#getRecordIfStored(DataIdentifier)} is completed. * * @param blobId id of the record retrieved */ void getRecordIfStoredCompleted(String blobId); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#getRecordIfStored(DataIdentifier)} fails. + * Called when a call to {@link DataStore#getRecordIfStored(DataIdentifier)} fails. * * @param blobId id of the record */ void getRecordIfStoredFailed(String blobId); /** - * Called when a {@link org.apache.jackrabbit.core.data.DataRecord} is retrieved via - * {@link org.apache.jackrabbit.core.data.DataStore#getRecordFromReference(String)}. + * Called when a {@link DataRecord} is retrieved via + * {@link DataStore#getRecordFromReference(String)}. * * @param timeTaken time taken to perform the operation * @param unit unit of time taken @@ -290,14 +291,14 @@ public void getAllIdentifiersFailed() { } void getRecordFromReferenceCalled(long timeTaken, TimeUnit unit, long size); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#getRecordFromReference(String)} is completed. + * Called when a call to {@link DataStore#getRecordFromReference(String)} is completed. * * @param reference reference of the record retrieved */ void getRecordFromReferenceCompleted(String reference); /** - * Called when a call to {@link org.apache.jackrabbit.core.data.DataStore#getRecordFromReference(String)} fails. + * Called when a call to {@link DataStore#getRecordFromReference(String)} fails. * * @param reference reference of the record */ diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/InMemoryBackend.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/InMemoryBackend.java new file mode 100644 index 00000000000..f8ba06f1e99 --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/InMemoryBackend.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +/** + * An in-memory backend implementation used to speed up testing. + */ +public class InMemoryBackend implements Backend { + + private HashMap data = new HashMap(); + + private HashMap timeMap = new HashMap(); + + private CachingDataStore store; + + private Properties properties; + + @Override + public void init(CachingDataStore store, String homeDir, String config) + throws DataStoreException { + // ignore + log("init"); + this.store = store; + } + + @Override + public void close() { + // ignore + log("close"); + } + + @Override + public boolean exists(final DataIdentifier identifier) { + log("exists " + identifier); + return data.containsKey(identifier); + } + + @Override + public Iterator getAllIdentifiers() + throws DataStoreException { + log("getAllIdentifiers"); + return data.keySet().iterator(); + } + + @Override + public InputStream read(final DataIdentifier identifier) + throws DataStoreException { + log("read " + identifier); + return new ByteArrayInputStream(data.get(identifier)); + } + + @Override + public void writeAsync(final DataIdentifier identifier, final File file, + final AsyncUploadCallback callback) throws DataStoreException { + this.write(identifier, file, true, callback); + } + + @Override + public void write(final DataIdentifier identifier, final File file) + throws DataStoreException { + this.write(identifier, file, false, null); + } + + @Override + public long getLastModified(final DataIdentifier identifier) + throws DataStoreException { + log("getLastModified " + identifier); + return timeMap.get(identifier); + } + + @Override + public void deleteRecord(final DataIdentifier identifier) + throws DataStoreException { + timeMap.remove(identifier); + data.remove(identifier); + } + + @Override + public Set deleteAllOlderThan(final long min) throws DataStoreException { + log("deleteAllOlderThan " + min); + Set tobeDeleted = new HashSet(); + for (Map.Entry entry : timeMap.entrySet()) { + DataIdentifier identifier = entry.getKey(); + long timestamp = entry.getValue(); + if (timestamp < min && !store.isInUse(identifier) + && store.confirmDelete(identifier)) { + store.deleteFromCache(identifier); + tobeDeleted.add(identifier); + } + } + for (DataIdentifier identifier : tobeDeleted) { + timeMap.remove(identifier); + data.remove(identifier); + } + return tobeDeleted; + } + + @Override + public long getLength(final DataIdentifier identifier) + throws DataStoreException { + try { + return data.get(identifier).length; + } catch (Exception e) { + throw new DataStoreException(e); + } + } + + @Override + public boolean exists(final DataIdentifier identifier, final boolean touch) + throws DataStoreException { + boolean retVal = data.containsKey(identifier); + if (retVal && touch) { + timeMap.put(identifier, System.currentTimeMillis()); + } + return retVal; + } + + @Override + public void touch(DataIdentifier identifier, long minModifiedDate) { + timeMap.put(identifier, System.currentTimeMillis()); + } + + @Override + public void touchAsync(DataIdentifier identifier, long minModifiedDate, + AsyncTouchCallback callback) { + timeMap.put(identifier, System.currentTimeMillis()); + callback.onSuccess(new AsyncTouchResult(identifier)); + } + + private void write(final DataIdentifier identifier, final File file, + final boolean async, final AsyncUploadCallback callback) + throws DataStoreException { + log("write " + identifier + " " + file.length()); + byte[] buffer = new byte[(int) file.length()]; + try { + if (async && callback == null) { + throw new IllegalArgumentException( + "callback parameter cannot be null"); + } + DataInputStream din = new DataInputStream(new FileInputStream(file)); + din.readFully(buffer); + din.close(); + data.put(identifier, buffer); + timeMap.put(identifier, System.currentTimeMillis()); + } catch (IOException e) { + if (async) { + callback.onAbort(new AsyncUploadResult(identifier, file)); + } + throw new DataStoreException(e); + } + if (async) { + callback.onSuccess(new AsyncUploadResult(identifier, file)); + } + } + + /** + * Properties used to configure the backend. If provided explicitly before + * init is invoked then these take precedence + * + * @param properties to configure S3Backend + */ + public void setProperties(Properties properties) { + this.properties = properties; + } + + /** + * Log a message if logging is enabled. + * + * @param message + * the message + */ + private void log(final String message) { + // System.out.println(message); + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/InMemoryDataStore.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/InMemoryDataStore.java new file mode 100644 index 00000000000..c6ad141abe2 --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/InMemoryDataStore.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.util.Properties; + +/** + * A caching data store that uses the in-memory backend. + */ +public class InMemoryDataStore extends CachingDataStore { + + private Properties properties; + + @Override + protected Backend createBackend() { + InMemoryBackend backend = new InMemoryBackend(); + if (properties != null) { + backend.setProperties(properties); + } + return backend; + } + + @Override + protected String getMarkerFile() { + return "mem.init.done"; + } + + /** + * Properties required to configure the S3Backend + */ + public void setProperties(Properties properties) { + this.properties = properties; + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/RandomInputStream.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/RandomInputStream.java new file mode 100644 index 00000000000..58ac940ab0f --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/RandomInputStream.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.BufferedInputStream; +import java.io.IOException; +import java.io.InputStream; + +/** + * An input stream that returns pseudo-random bytes. + */ +public class RandomInputStream extends InputStream { + + private static final long MUL = 0x5DEECE66DL; + private static final long ADD = 0xBL; + private static final long MASK = (1L << 48) - 1; + private static final int DEFAULT_MAX_READ_BLOCK_SIZE = 15; + + private final long initialSeed; + private final long len; + private long markedState; + private long pos; + private long markedPos; + private long state; + private int maxReadBlockSize; + + public String toString() { + return "new RandomInputStream(" + initialSeed + ", " + len + ")"; + } + + public RandomInputStream(long seed, long len) { + this(seed, len, DEFAULT_MAX_READ_BLOCK_SIZE); + } + + public static void compareStreams(InputStream a, InputStream b) throws IOException { + a = new BufferedInputStream(a); + b = new BufferedInputStream(b); + long pos = 0; + while (true) { + int x = a.read(); + int y = b.read(); + if (x == -1 || y == -1) { + if (x == y) { + break; + } + } + if (x != y) { + throw new IOException("Incorrect byte at position " + pos + ": x=" + x + " y=" + y); + } + } + } + + public RandomInputStream(long seed, long len, int maxReadBlockSize) { + this.initialSeed = seed; + this.len = len; + this.maxReadBlockSize = maxReadBlockSize; + setSeed(seed); + reset(); + } + + public long skip(long n) { + n = getReadBlock(n); + if (n == 0) { + return -1; + } + pos += n; + return n; + } + + private int getReadBlock(long n) { + if (n > (len - pos)) { + n = (len - pos); + } + if (n > maxReadBlockSize) { + n = maxReadBlockSize; + } else if (n < 0) { + n = 0; + } + return (int) n; + } + + public int read(byte[] b, int off, int len) { + if (pos >= this.len) { + return -1; + } + len = getReadBlock(len); + if (len == 0) { + return -1; + } + for (int i = 0; i < len; i++) { + b[off + i] = (byte) (next() & 255); + } + pos += len; + return len; + } + + public int read(byte[] b) { + return read(b, 0, b.length); + } + + public void close() { + pos = len; + } + + private void setSeed(long seed) { + markedState = (seed ^ MUL) & MASK; + } + + private int next() { + state = (state * MUL + ADD) & MASK; + return (int) (state >>> (48 - 32)); + } + + public void reset() { + pos = markedPos; + state = markedState; + } + + public int read() { + if (pos >= len) { + return -1; + } + pos++; + return next() & 255; + } + + public boolean markSupported() { + return true; + } + + public void mark(int readlimit) { + markedPos = pos; + markedState = state; + } + +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCachingFDS.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCachingFDS.java new file mode 100644 index 00000000000..0051e61fe51 --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCachingFDS.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.io.FileOutputStream; +import java.util.Properties; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestCachingFDS extends TestFileDataStore { + + protected static final Logger LOG = LoggerFactory.getLogger(TestCachingFDS.class); + + private static final String PENDIND_UPLOAD_FILE = "async-pending-uploads.ser"; + + private static final String TO_BE_DELETED_UPLOAD_FILE = "async-tobedeleted-uploads.ser"; + + protected DataStore createDataStore() throws RepositoryException { + CachingFDS cacheFDS = new CachingFDS(); + Properties props = loadProperties("/fs.properties"); + String pathValue = props.getProperty(FSBackend.FS_BACKEND_PATH); + if (pathValue != null && !"".equals(pathValue.trim())) { + fsPath = pathValue + "/cachingFds" + "-" + + String.valueOf(randomGen.nextInt(100000)) + "-" + + String.valueOf(randomGen.nextInt(100000)); + } else { + fsPath = dataStoreDir + "/cachingFds"; + } + props.setProperty(FSBackend.FS_BACKEND_PATH, fsPath); + LOG.info("fsBackendPath [{}] set.", fsPath); + cacheFDS.setProperties(props); + cacheFDS.setSecret("12345"); + // disable asynchronous writing in testing. + cacheFDS.setAsyncUploadLimit(0); + cacheFDS.init(dataStoreDir); + return cacheFDS; + } + + /** + * Test robustness of {@link AsyncUploadCache} corruption. + */ + public void testAsyncUploadCacheCorruption() { + try { + ds = createDataStore(); + File pendingUploads = new File(dataStoreDir + "/" + + PENDIND_UPLOAD_FILE); + FileOutputStream fos = new FileOutputStream(pendingUploads); + IOUtils.write("garbage-data", fos); + fos.close(); + + File tobeDeletedFile = new File(dataStoreDir + "/" + + TO_BE_DELETED_UPLOAD_FILE); + fos = new FileOutputStream(tobeDeletedFile); + IOUtils.write("garbage-data", fos); + fos.close(); + ds.close(); + + doAddRecordTest(); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCachingFDSCacheOff.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCachingFDSCacheOff.java new file mode 100644 index 00000000000..1869fe73a2e --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCachingFDSCacheOff.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.util.Properties; + +import javax.jcr.RepositoryException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TestCachingFDSCacheOff extends TestFileDataStore { + + protected static final Logger LOG = LoggerFactory.getLogger(TestCachingFDS.class); + + protected DataStore createDataStore() throws RepositoryException { + CachingFDS cacheFDS = new CachingFDS(); + Properties props = loadProperties("/fs.properties"); + String pathValue = props.getProperty(FSBackend.FS_BACKEND_PATH); + if (pathValue != null && !"".equals(pathValue.trim())) { + fsPath = pathValue + "/cachingFds" + "-" + + String.valueOf(randomGen.nextInt(100000)) + "-" + + String.valueOf(randomGen.nextInt(100000)); + } else { + fsPath = dataStoreDir + "/cachingFDS"; + } + props.setProperty(FSBackend.FS_BACKEND_PATH, fsPath); + cacheFDS.setProperties(props); + cacheFDS.setSecret("12345"); + // disable asynchronous writing in testing. + cacheFDS.setAsyncUploadLimit(0); + cacheFDS.setCacheSize(0); + cacheFDS.init(dataStoreDir); + return cacheFDS; + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCaseBase.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCaseBase.java new file mode 100644 index 00000000000..7242f2ee187 --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestCaseBase.java @@ -0,0 +1,681 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.SequenceInputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; +import java.util.Random; + +import javax.jcr.RepositoryException; + +import junit.framework.TestCase; + +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test base class which covers all scenarios. + */ +public abstract class TestCaseBase extends TestCase { + + /** + * Logger + */ + protected static final Logger LOG = LoggerFactory.getLogger(TestCaseBase.class); + + /** + * temp directory + */ + private static final String TEST_DIR = "target/temp"; + + /** + * Constant describing aws properties file path. + */ + public static final String CONFIG = "config"; + + /** + * length of record to be added + */ + protected int dataLength = 123456; + + /** + * datastore directory path + */ + protected String dataStoreDir; + + protected DataStore ds; + + /** + * Random number generator to populate data + */ + protected Random randomGen = new Random(); + + /** + * Delete temporary directory. + */ + @Override + protected void setUp() throws Exception { + dataStoreDir = TEST_DIR + "-" + String.valueOf(randomGen.nextInt(9999)) + + "-" + String.valueOf(randomGen.nextInt(9999)); + // delete directory if it exists + File directory = new File(dataStoreDir); + if (directory.exists()) { + boolean delSuccessFul = FileUtils.deleteQuietly(directory); + int retry = 2, count = 0; + while (!delSuccessFul && count <= retry) { + // try once more + delSuccessFul = FileUtils.deleteQuietly(new File(dataStoreDir)); + count++; + } + LOG.info("setup : directory [" + dataStoreDir + "] deleted [" + + delSuccessFul + "]"); + } + } + + @Override + protected void tearDown() { + boolean delSuccessFul = FileUtils.deleteQuietly(new File(dataStoreDir)); + int retry = 2, count = 0; + while (!delSuccessFul && count <= retry) { + // try once more + delSuccessFul = FileUtils.deleteQuietly(new File(dataStoreDir)); + count++; + } + LOG.info("tearDown : directory [" + dataStoreDir + "] deleted [" + + delSuccessFul + "]"); + } + /** + * Testcase to validate {@link DataStore#addRecord(InputStream)} API. + */ + public void testAddRecord() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#addRecord, testDir=" + dataStoreDir); + doAddRecordTest(); + LOG.info("Testcase: " + this.getClass().getName() + + "#addRecord finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + } + + /** + * Testcase to validate {@link DataStore#getRecord(DataIdentifier)} API. + */ + public void testGetRecord() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testGetRecord, testDir=" + dataStoreDir); + doGetRecordTest(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testGetRecord finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + } + } + + /** + * Testcase to validate {@link DataStore#getAllIdentifiers()} API. + */ + public void testGetAllIdentifiers() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testGetAllIdentifiers, testDir=" + dataStoreDir); + doGetAllIdentifiersTest(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testGetAllIdentifiers finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + } + + /** + * Testcase to validate {@link DataStore#updateModifiedDateOnAccess(long)} + * API. + */ + public void testUpdateLastModifiedOnAccess() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testUpdateLastModifiedOnAccess, testDir=" + dataStoreDir); + doUpdateLastModifiedOnAccessTest(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testUpdateLastModifiedOnAccess finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + } + } + + /** + * Testcase to validate + * {@link MultiDataStoreAware#deleteRecord(DataIdentifier)}.API. + */ + public void testDeleteRecord() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testDeleteRecord, testDir=" + dataStoreDir); + doDeleteRecordTest(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testDeleteRecord finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + } + + /** + * Testcase to validate {@link DataStore#deleteAllOlderThan(long)} API. + */ + public void testDeleteAllOlderThan() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testDeleteAllOlderThan, testDir=" + dataStoreDir); + doDeleteAllOlderThan(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testDeleteAllOlderThan finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + } + + /** + * Testcase to validate {@link DataStore#getRecordFromReference(String)} + */ + public void testReference() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testReference, testDir=" + dataStoreDir); + doReferenceTest(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testReference finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + } + + /** + * Testcase to validate mixed scenario use of {@link DataStore}. + */ + public void testSingleThread() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testSingleThread, testDir=" + dataStoreDir); + doTestSingleThread(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testSingleThread finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + } + + /** + * Testcase to validate mixed scenario use of {@link DataStore} in + * multi-threaded concurrent environment. + */ + public void testMultiThreaded() { + try { + long start = System.currentTimeMillis(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testMultiThreaded, testDir=" + dataStoreDir); + doTestMultiThreaded(); + LOG.info("Testcase: " + this.getClass().getName() + + "#testMultiThreaded finished, time taken = [" + + (System.currentTimeMillis() - start) + "]ms"); + } catch (Exception e) { + LOG.error("error:", e); + fail(e.getMessage()); + } + + } + + protected abstract DataStore createDataStore() throws RepositoryException ; + + /** + * Test {@link DataStore#addRecord(InputStream)} and assert length of added + * record. + */ + protected void doAddRecordTest() throws Exception { + ds = createDataStore(); + byte[] data = new byte[dataLength]; + randomGen.nextBytes(data); + DataRecord rec = ds.addRecord(new ByteArrayInputStream(data)); + assertEquals(data.length, rec.getLength()); + assertRecord(data, rec); + ds.close(); + } + + /** + * Test {@link DataStore#getRecord(DataIdentifier)} and assert length and + * inputstream. + */ + protected void doGetRecordTest() throws Exception { + ds = createDataStore(); + byte[] data = new byte[dataLength]; + randomGen.nextBytes(data); + DataRecord rec = ds.addRecord(new ByteArrayInputStream(data)); + rec = ds.getRecord(rec.getIdentifier()); + assertEquals(data.length, rec.getLength()); + assertRecord(data, rec); + ds.close(); + } + + /** + * Test {@link MultiDataStoreAware#deleteRecord(DataIdentifier)}. + */ + protected void doDeleteRecordTest() throws Exception { + ds = createDataStore(); + Random random = randomGen; + byte[] data1 = new byte[dataLength]; + random.nextBytes(data1); + DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data1)); + + byte[] data2 = new byte[dataLength]; + random.nextBytes(data2); + DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data2)); + + byte[] data3 = new byte[dataLength]; + random.nextBytes(data3); + DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data3)); + + ((MultiDataStoreAware)ds).deleteRecord(rec2.getIdentifier()); + + assertNull("rec2 should be null", + ds.getRecordIfStored(rec2.getIdentifier())); + assertEquals(new ByteArrayInputStream(data1), + ds.getRecord(rec1.getIdentifier()).getStream()); + assertEquals(new ByteArrayInputStream(data3), + ds.getRecord(rec3.getIdentifier()).getStream()); + ds.close(); + } + + /** + * Test {@link DataStore#getAllIdentifiers()} and asserts all identifiers + * are returned. + */ + protected void doGetAllIdentifiersTest() throws Exception { + ds = createDataStore(); + List list = new ArrayList(); + Random random = randomGen; + byte[] data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec = ds.addRecord(new ByteArrayInputStream(data)); + list.add(rec.getIdentifier()); + + data = new byte[dataLength]; + random.nextBytes(data); + rec = ds.addRecord(new ByteArrayInputStream(data)); + list.add(rec.getIdentifier()); + + data = new byte[dataLength]; + random.nextBytes(data); + rec = ds.addRecord(new ByteArrayInputStream(data)); + list.add(rec.getIdentifier()); + + Iterator itr = ds.getAllIdentifiers(); + while (itr.hasNext()) { + assertTrue("record found on list", list.remove(itr.next())); + } + assertEquals(0, list.size()); + ds.close(); + } + + /** + * Asserts that timestamp of all records accessed after + * {@link DataStore#updateModifiedDateOnAccess(long)} invocation. + */ + protected void doUpdateLastModifiedOnAccessTest() throws Exception { + ds = createDataStore(); + Random random = randomGen; + byte[] data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data)); + + data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data)); + LOG.debug("rec2 timestamp=" + rec2.getLastModified()); + + // sleep for some time to ensure that async upload completes in backend. + sleep(6000); + long updateTime = System.currentTimeMillis(); + LOG.debug("updateTime=" + updateTime); + ds.updateModifiedDateOnAccess(updateTime); + + // sleep to workaround System.currentTimeMillis granularity. + sleep(3000); + data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data)); + + data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec4 = ds.addRecord(new ByteArrayInputStream(data)); + + rec1 = ds.getRecord(rec1.getIdentifier()); + + assertEquals("rec1 touched", true, rec1.getLastModified() > updateTime); + LOG.debug("rec2 timestamp=" + rec2.getLastModified()); + assertEquals("rec2 not touched", true, + rec2.getLastModified() < updateTime); + assertEquals("rec3 touched", true, rec3.getLastModified() > updateTime); + assertEquals("rec4 touched", true, rec4.getLastModified() > updateTime); + ds.close(); + + } + + /** + * Asserts that {@link DataStore#deleteAllOlderThan(long)} only deleted + * records older than argument passed. + */ + protected void doDeleteAllOlderThan() throws Exception { + ds = createDataStore(); + Random random = randomGen; + byte[] data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data)); + + data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data)); + + // sleep for some time to ensure that async upload completes in backend. + sleep(10000); + long updateTime = System.currentTimeMillis(); + ds.updateModifiedDateOnAccess(updateTime); + + // sleep to workaround System.currentTimeMillis granularity. + sleep(3000); + data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data)); + + data = new byte[dataLength]; + random.nextBytes(data); + DataRecord rec4 = ds.addRecord(new ByteArrayInputStream(data)); + + rec1 = ds.getRecord(rec1.getIdentifier()); + ds.clearInUse(); + assertEquals("only rec2 should be deleted", 1, + ds.deleteAllOlderThan(updateTime)); + assertNull("rec2 should be null", + ds.getRecordIfStored(rec2.getIdentifier())); + + Iterator itr = ds.getAllIdentifiers(); + List list = new ArrayList(); + list.add(rec1.getIdentifier()); + list.add(rec3.getIdentifier()); + list.add(rec4.getIdentifier()); + while (itr.hasNext()) { + assertTrue("record found on list", list.remove(itr.next())); + } + + assertEquals("touched records found", 0, list.size()); + assertEquals("rec1 touched", true, rec1.getLastModified() > updateTime); + assertEquals("rec3 touched", true, rec3.getLastModified() > updateTime); + assertEquals("rec4 touched", true, rec4.getLastModified() > updateTime); + ds.close(); + } + + /** + * Test if record can be accessed via + * {@link DataStore#getRecordFromReference(String)} + */ + protected void doReferenceTest() throws Exception { + ds = createDataStore(); + byte[] data = new byte[dataLength]; + randomGen.nextBytes(data); + String reference; + DataRecord record = ds.addRecord(new ByteArrayInputStream(data)); + reference = record.getReference(); + assertReference(data, reference, ds); + ds.close(); + } + + /** + * Method to validate mixed scenario use of {@link DataStore}. + */ + protected void doTestSingleThread() throws Exception { + ds = createDataStore(); + doTestMultiThreaded(ds, 1); + ds.close(); + } + + /** + * Method to validate mixed scenario use of {@link DataStore} in + * multi-threaded concurrent environment. + */ + protected void doTestMultiThreaded() throws Exception { + ds = createDataStore(); + doTestMultiThreaded(ds, 4); + ds.close(); + } + + /** + * Method to assert record with byte array. + */ + protected void assertRecord(byte[] expected, DataRecord record) + throws DataStoreException, IOException { + InputStream stream = record.getStream(); + try { + for (int i = 0; i < expected.length; i++) { + assertEquals(expected[i] & 0xff, stream.read()); + } + assertEquals(-1, stream.read()); + } finally { + stream.close(); + } + } + + /** + * Method to run {@link TestCaseBase#doTest(DataStore, int)} in multiple + * concurrent threads. + */ + protected void doTestMultiThreaded(final DataStore ds, int threadCount) + throws Exception { + final Exception[] exception = new Exception[1]; + Thread[] threads = new Thread[threadCount]; + for (int i = 0; i < threadCount; i++) { + final int x = i; + Thread t = new Thread() { + public void run() { + try { + doTest(ds, x); + } catch (Exception e) { + exception[0] = e; + } + } + }; + threads[i] = t; + t.start(); + } + for (int i = 0; i < threadCount; i++) { + threads[i].join(); + } + if (exception[0] != null) { + throw exception[0]; + } + } + + /** + * Assert randomly read stream from record. + */ + void doTest(DataStore ds, int offset) throws Exception { + ArrayList list = new ArrayList(); + HashMap map = new HashMap(); + for (int i = 0; i < 10; i++) { + int size = 100000 - (i * 100); + RandomInputStream in = new RandomInputStream(size + offset, size); + DataRecord rec = ds.addRecord(in); + list.add(rec); + map.put(rec, new Integer(size)); + } + Random random = new Random(1); + for (int i = 0; i < list.size(); i++) { + int pos = random.nextInt(list.size()); + DataRecord rec = list.get(pos); + int size = map.get(rec); + rec = ds.getRecord(rec.getIdentifier()); + assertEquals(size, rec.getLength()); + RandomInputStream expected = new RandomInputStream(size + offset, + size); + InputStream in = rec.getStream(); + // Workaround for race condition that can happen with low cache size relative to the test + // read immediately + byte[] buffer = new byte[1]; + in.read(buffer); + in = new SequenceInputStream(new ByteArrayInputStream(buffer), in); + + if (random.nextBoolean()) { + in = readInputStreamRandomly(in, random); + } + assertEquals(expected, in); + } + } + + InputStream readInputStreamRandomly(InputStream in, Random random) + throws IOException { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + byte[] buffer = new byte[8000]; + while (true) { + if (random.nextBoolean()) { + int x = in.read(); + if (x < 0) { + break; + } + out.write(x); + } else { + if (random.nextBoolean()) { + int l = in.read(buffer); + if (l < 0) { + break; + } + out.write(buffer, 0, l); + } else { + int offset = random.nextInt(buffer.length / 2); + int len = random.nextInt(buffer.length / 2); + int l = in.read(buffer, offset, len); + if (l < 0) { + break; + } + out.write(buffer, offset, l); + } + } + } + in.close(); + return new ByteArrayInputStream(out.toByteArray()); + } + + /** + * Assert two inputstream + */ + protected void assertEquals(InputStream a, InputStream b) + throws IOException { + try { + assertTrue("binary not equal", + org.apache.commons.io.IOUtils.contentEquals(a, b)); + } finally { + try { + a.close(); + } catch (Exception ignore) { + } + try { + b.close(); + } catch (Exception ignore) { + } + } + } + + /** + * Assert inputstream read from reference. + */ + protected void assertReference(byte[] expected, String reference, + DataStore store) throws Exception { + DataRecord record = store.getRecordFromReference(reference); + assertNotNull(record); + assertEquals(expected.length, record.getLength()); + + InputStream stream = record.getStream(); + try { + assertTrue("binary not equal", + org.apache.commons.io.IOUtils.contentEquals( + new ByteArrayInputStream(expected), stream)); + } finally { + stream.close(); + } + } + + /** + * Utility method to stop execution for duration time. + * + * @param duration + * time in milli seconds + */ + protected void sleep(long duration) { + long expected = System.currentTimeMillis() + duration; + while (System.currentTimeMillis() < expected) { + try { + Thread.sleep(1); + } catch (InterruptedException ie) { + + } + } + } + + /** + * Return {@link Properties} from class resource. Return empty + * {@link Properties} if not found. + */ + protected Properties loadProperties(String resource) { + Properties configProp = new Properties(); + try { + configProp.load(this.getClass().getResourceAsStream(resource)); + } catch (Exception ignore) { + + } + return configProp; + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestFileDataStore.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestFileDataStore.java new file mode 100644 index 00000000000..e7f78735ebd --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestFileDataStore.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.File; +import java.util.Properties; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test cases to test {@link FileDataStore} + */ +public class TestFileDataStore extends TestCaseBase { + + protected static final Logger LOG = LoggerFactory.getLogger(TestFileDataStore.class); + + String fsPath; + + @Override + protected DataStore createDataStore() throws RepositoryException { + FileDataStore fds = new FileDataStore(); + Properties props = loadProperties("/fs.properties"); + String pathValue = props.getProperty(FSBackend.FS_BACKEND_PATH); + if (pathValue != null && !"".equals(pathValue.trim())) { + fsPath = pathValue + "/fds" + "-" + + String.valueOf(randomGen.nextInt(100000)) + "-" + + String.valueOf(randomGen.nextInt(100000)); + } else { + fsPath = dataStoreDir + "/repository/datastore"; + } + LOG.info("path [{}] set.", fsPath); + fds.setPath(fsPath); + fds.init(dataStoreDir); + return fds; + } + + @Override + protected void tearDown() { + LOG.info("cleaning fsPath [{}]", fsPath); + File f = new File(fsPath); + try { + for (int i = 0; i < 4 && f.exists(); i++) { + FileUtils.deleteQuietly(f); + Thread.sleep(2000); + } + } catch (Exception ignore) { + + } + super.tearDown(); + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestInMemDs.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestInMemDs.java new file mode 100644 index 00000000000..b42db48583d --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestInMemDs.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import javax.jcr.RepositoryException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test {@link CachingDataStore} with InMemoryBackend and local cache on. + */ +public class TestInMemDs extends TestCaseBase { + + protected static final Logger LOG = LoggerFactory.getLogger(TestInMemDs.class); + + @Override + protected DataStore createDataStore() throws RepositoryException { + InMemoryDataStore inMemDS = new InMemoryDataStore(); + inMemDS.setProperties(null); + inMemDS.init(dataStoreDir); + inMemDS.setSecret("12345"); + return inMemDS; + } + + +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestInMemDsCacheOff.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestInMemDsCacheOff.java new file mode 100644 index 00000000000..97a75742d0c --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestInMemDsCacheOff.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import javax.jcr.RepositoryException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test {@link CachingDataStore} with InMemoryBackend and local cache off. + */ + +public class TestInMemDsCacheOff extends TestCaseBase { + + protected static final Logger LOG = LoggerFactory.getLogger(TestInMemDsCacheOff.class); + @Override + protected DataStore createDataStore() throws RepositoryException { + InMemoryDataStore inMemDS = new InMemoryDataStore(); + inMemDS.setProperties(null); + inMemDS.init(dataStoreDir); + inMemDS.setSecret("12345"); + inMemDS.setCacheSize(0); + return inMemDS; + } +} diff --git a/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestLocalCache.java b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestLocalCache.java new file mode 100644 index 00000000000..d20f0085fcb --- /dev/null +++ b/oak-blob/src/test/java/org/apache/jackrabbit/oak/spi/blob/data/TestLocalCache.java @@ -0,0 +1,404 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.spi.blob.data; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import junit.framework.TestCase; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.oak.spi.blob.data.util.NamedThreadFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Testcase to test local cache. + */ +public class TestLocalCache extends TestCase { + + private static final String CACHE_DIR = "target/cache"; + + private static final String TEMP_DIR = "target/temp"; + + private static final String TARGET_DIR = "target"; + + protected String cacheDirPath; + + protected String tempDirPath; + + /** + * Random number generator to populate data + */ + protected Random randomGen = new Random(); + + private static final Logger LOG = LoggerFactory.getLogger(TestLocalCache.class); + + @Override + protected void setUp() { + try { + cacheDirPath = CACHE_DIR + "-" + + String.valueOf(randomGen.nextInt(9999)) + "-" + + String.valueOf(randomGen.nextInt(9999)); + File cachedir = new File(cacheDirPath); + for (int i = 0; i < 4 && cachedir.exists(); i++) { + FileUtils.deleteQuietly(cachedir); + Thread.sleep(1000); + } + cachedir.mkdirs(); + + tempDirPath = TEMP_DIR + "-" + + String.valueOf(randomGen.nextInt(9999)) + "-" + + String.valueOf(randomGen.nextInt(9999)); + File tempdir = new File(tempDirPath); + for (int i = 0; i < 4 && tempdir.exists(); i++) { + FileUtils.deleteQuietly(tempdir); + Thread.sleep(1000); + } + tempdir.mkdirs(); + } catch (Exception e) { + LOG.error("error:", e); + fail(); + } + } + + @Override + protected void tearDown() throws Exception { + File cachedir = new File(cacheDirPath); + for (int i = 0; i < 4 && cachedir.exists(); i++) { + FileUtils.deleteQuietly(cachedir); + Thread.sleep(1000); + } + + File tempdir = new File(tempDirPath); + for (int i = 0; i < 4 && tempdir.exists(); i++) { + FileUtils.deleteQuietly(tempdir); + Thread.sleep(1000); + } + } + + /** + * Test to validate store retrieve in cache. + */ + public void testStoreRetrieve() { + try { + AsyncUploadCache pendingFiles = new AsyncUploadCache(); + pendingFiles.init(tempDirPath, cacheDirPath, 100); + pendingFiles.reset(); + LocalCache cache = new LocalCache(cacheDirPath, tempDirPath, 400, + 0.95, 0.70, pendingFiles); + Random random = new Random(12345); + byte[] data = new byte[100]; + Map byteMap = new HashMap(); + random.nextBytes(data); + byteMap.put("a1", data); + + data = new byte[100]; + random.nextBytes(data); + byteMap.put("a2", data); + + data = new byte[100]; + random.nextBytes(data); + byteMap.put("a3", data); + + cache.store("a1", new ByteArrayInputStream(byteMap.get("a1"))); + cache.store("a2", new ByteArrayInputStream(byteMap.get("a2"))); + cache.store("a3", new ByteArrayInputStream(byteMap.get("a3"))); + InputStream result = cache.getIfStored("a1"); + assertEquals(new ByteArrayInputStream(byteMap.get("a1")), result); + IOUtils.closeQuietly(result); + result = cache.getIfStored("a2"); + assertEquals(new ByteArrayInputStream(byteMap.get("a2")), result); + IOUtils.closeQuietly(result); + result = cache.getIfStored("a3"); + assertEquals(new ByteArrayInputStream(byteMap.get("a3")), result); + IOUtils.closeQuietly(result); + } catch (Exception e) { + LOG.error("error:", e); + fail(); + } + } + + /** + * Test to verify cache's purging if cache current size exceeds + * cachePurgeTrigFactor * size. + */ + public void testAutoPurge() { + try { + AsyncUploadCache pendingFiles = new AsyncUploadCache(); + pendingFiles.init(tempDirPath, cacheDirPath, 100); + pendingFiles.reset(); + LocalCache cache = new LocalCache(cacheDirPath, tempDirPath, 400, + 0.95, 0.70, pendingFiles); + Random random = new Random(12345); + byte[] data = new byte[100]; + Map byteMap = new HashMap(); + random.nextBytes(data); + byteMap.put("a1", data); + + data = new byte[100]; + random.nextBytes(data); + byteMap.put("a2", data); + + data = new byte[100]; + random.nextBytes(data); + byteMap.put("a3", data); + + data = new byte[100]; + random.nextBytes(data); + byteMap.put("a4", data); + + cache.store("a1", new ByteArrayInputStream(byteMap.get("a1"))); + cache.store("a2", new ByteArrayInputStream(byteMap.get("a2"))); + cache.store("a3", new ByteArrayInputStream(byteMap.get("a3"))); + + InputStream result = cache.getIfStored("a1"); + assertEquals(new ByteArrayInputStream(byteMap.get("a1")), result); + IOUtils.closeQuietly(result); + result = cache.getIfStored("a2"); + assertEquals(new ByteArrayInputStream(byteMap.get("a2")), result); + IOUtils.closeQuietly(result); + result = cache.getIfStored("a3"); + assertEquals(new ByteArrayInputStream(byteMap.get("a3")), result); + IOUtils.closeQuietly(result); + + data = new byte[90]; + random.nextBytes(data); + byteMap.put("a4", data); + // storing a4 should purge cache + cache.store("a4", new ByteArrayInputStream(byteMap.get("a4"))); + do { + Thread.sleep(1000); + } while (cache.isInPurgeMode()); + + result = cache.getIfStored("a1"); + assertNull("a1 should be null", result); + IOUtils.closeQuietly(result); + + result = cache.getIfStored("a2"); + assertNull("a2 should be null", result); + IOUtils.closeQuietly(result); + + result = cache.getIfStored("a3"); + assertEquals(new ByteArrayInputStream(byteMap.get("a3")), result); + IOUtils.closeQuietly(result); + + result = cache.getIfStored("a4"); + assertEquals(new ByteArrayInputStream(byteMap.get("a4")), result); + IOUtils.closeQuietly(result); + + data = new byte[100]; + random.nextBytes(data); + byteMap.put("a5", data); + cache.store("a5", new ByteArrayInputStream(byteMap.get("a5"))); + result = cache.getIfStored("a3"); + assertEquals(new ByteArrayInputStream(byteMap.get("a3")), result); + IOUtils.closeQuietly(result); + } catch (Exception e) { + LOG.error("error:", e); + fail(); + } + } + + /** + * Test to verify cache's purging if cache current size exceeds + * cachePurgeTrigFactor * size. + */ + public void testAutoPurgeWithPendingUpload() { + try { + AsyncUploadCache pendingFiles = new AsyncUploadCache(); + pendingFiles.init(tempDirPath, cacheDirPath, 100); + pendingFiles.reset(); + LocalCache cache = new LocalCache(cacheDirPath, tempDirPath, 400, + 0.95, 0.70, pendingFiles); + Random random = new Random(12345); + byte[] data = new byte[125]; + Map byteMap = new HashMap(); + random.nextBytes(data); + byteMap.put("a1", data); + + data = new byte[125]; + random.nextBytes(data); + byteMap.put("a2", data); + + data = new byte[125]; + random.nextBytes(data); + byteMap.put("a3", data); + + data = new byte[100]; + random.nextBytes(data); + byteMap.put("a4", data); + File tempDir = new File(tempDirPath); + File f = File.createTempFile("test", "tmp", tempDir); + FileOutputStream fos = new FileOutputStream(f); + fos.write(byteMap.get("a1")); + fos.close(); + AsyncUploadCacheResult result = cache.store("a1", f, true); + assertTrue("should be able to add to pending upload", + result.canAsyncUpload()); + + f = File.createTempFile("test", "tmp", tempDir); + fos = new FileOutputStream(f); + fos.write(byteMap.get("a2")); + fos.close(); + result = cache.store("a2", f, true); + assertTrue("should be able to add to pending upload", + result.canAsyncUpload()); + + f = File.createTempFile("test", "tmp", tempDir); + fos = new FileOutputStream(f); + fos.write(byteMap.get("a3")); + fos.close(); + result = cache.store("a3", f, true); + assertTrue("should be able to add to pending upload", + result.canAsyncUpload()); + + InputStream inp = cache.getIfStored("a1"); + assertEquals(new ByteArrayInputStream(byteMap.get("a1")), inp); + IOUtils.closeQuietly(inp); + inp = cache.getIfStored("a2"); + assertEquals(new ByteArrayInputStream(byteMap.get("a2")), inp); + IOUtils.closeQuietly(inp); + inp = cache.getIfStored("a3"); + assertEquals(new ByteArrayInputStream(byteMap.get("a3")), inp); + IOUtils.closeQuietly(inp); + + data = new byte[90]; + random.nextBytes(data); + byteMap.put("a4", data); + + f = File.createTempFile("test", "tmp", tempDir); + fos = new FileOutputStream(f); + fos.write(byteMap.get("a4")); + fos.close(); + + result = cache.store("a4", f, true); + assertFalse("should not be able to add to pending upload", + result.canAsyncUpload()); + Thread.sleep(1000); + + inp = cache.getIfStored("a1"); + assertEquals(new ByteArrayInputStream(byteMap.get("a1")), inp); + IOUtils.closeQuietly(inp); + inp = cache.getIfStored("a2"); + assertEquals(new ByteArrayInputStream(byteMap.get("a2")), inp); + IOUtils.closeQuietly(inp); + inp = cache.getIfStored("a3"); + assertEquals(new ByteArrayInputStream(byteMap.get("a3")), inp); + IOUtils.closeQuietly(inp); + inp = cache.getIfStored("a4"); + assertNull("a4 should be null", inp); + } catch (Exception e) { + LOG.error("error:", e); + fail(); + } + } + + /** + * Test concurrent {@link LocalCache} initialization with storing + * {@link LocalCache} + */ + public void testConcurrentInitWithStore() { + try { + AsyncUploadCache pendingFiles = new AsyncUploadCache(); + pendingFiles.init(tempDirPath, cacheDirPath, 100); + pendingFiles.reset(); + LocalCache cache = new LocalCache(cacheDirPath, tempDirPath, + 10000000, 0.95, 0.70, pendingFiles); + Random random = new Random(12345); + int fileUploads = 1000; + Map byteMap = new HashMap( + fileUploads); + byte[] data; + for (int i = 0; i < fileUploads; i++) { + data = new byte[100]; + random.nextBytes(data); + String key = "a" + i; + byteMap.put(key, data); + cache.store(key, new ByteArrayInputStream(byteMap.get(key))); + } + cache.close(); + + ExecutorService executor = Executors.newFixedThreadPool(10, + new NamedThreadFactory("localcache-store-worker")); + cache = new LocalCache(cacheDirPath, tempDirPath, 10000000, 0.95, + 0.70, pendingFiles); + executor.execute(new StoreWorker(cache, byteMap)); + executor.shutdown(); + while (!executor.awaitTermination(15, TimeUnit.SECONDS)) { + } + } catch (Exception e) { + LOG.error("error:", e); + fail(); + } + } + + private class StoreWorker implements Runnable { + Map byteMap; + + LocalCache cache; + + Random random; + + private StoreWorker(LocalCache cache, Map byteMap) { + this.byteMap = byteMap; + this.cache = cache; + random = new Random(byteMap.size()); + } + + public void run() { + try { + for (int i = 0; i < 100; i++) { + String key = "a" + random.nextInt(byteMap.size()); + LOG.debug("key=" + key); + cache.store(key, new ByteArrayInputStream(byteMap.get(key))); + } + } catch (Exception e) { + LOG.error("error:", e); + fail(); + } + } + } + + /** + * Assert two inputstream + */ + protected void assertEquals(InputStream a, InputStream b) + throws IOException { + while (true) { + int ai = a.read(); + int bi = b.read(); + assertEquals(ai, bi); + if (ai < 0) { + break; + } + } + IOUtils.closeQuietly(a); + IOUtils.closeQuietly(b); + } + +} diff --git a/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/datastore/DataStoreTextWriterTest.java b/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/datastore/DataStoreTextWriterTest.java index a20422e5173..899cf157b38 100644 --- a/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/datastore/DataStoreTextWriterTest.java +++ b/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/datastore/DataStoreTextWriterTest.java @@ -23,8 +23,8 @@ import java.io.File; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.plugins.blob.BlobStoreBlob; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils; diff --git a/oak-it-osgi/pom.xml b/oak-it-osgi/pom.xml index 4161e48ac8a..ae973c61e6a 100644 --- a/oak-it-osgi/pom.xml +++ b/oak-it-osgi/pom.xml @@ -153,6 +153,12 @@ ${project.version} test + + org.apache.jackrabbit + oak-blob-cloud + ${project.version} + test + org.apache.jackrabbit oak-blob-cloud-azure diff --git a/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.jcr.osgi.RepositoryManager.cfg b/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.jcr.osgi.RepositoryManager.cfg index 9de8f50d9c2..f8181f92fb2 100644 --- a/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.jcr.osgi.RepositoryManager.cfg +++ b/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.jcr.osgi.RepositoryManager.cfg @@ -1,15 +1,15 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#Empty config to trigger default setup +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#Empty config to trigger default setup diff --git a/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.cfg b/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.cfg index d05269419a0..ccc6a4fde9b 100644 --- a/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.cfg +++ b/oak-it-osgi/src/test/config/org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.cfg @@ -1,16 +1,16 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name=Oak -repository.home=target/tarmk +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name=Oak +repository.home=target/tarmk diff --git a/oak-it-osgi/test-bundles.xml b/oak-it-osgi/test-bundles.xml index d43872f4eb9..bacc80251fe 100644 --- a/oak-it-osgi/test-bundles.xml +++ b/oak-it-osgi/test-bundles.xml @@ -42,7 +42,6 @@ org.apache.commons:commons-text org.apache.commons:commons-collections4 org.apache.jackrabbit:jackrabbit-jcr-commons - org.apache.jackrabbit:jackrabbit-data org.apache.jackrabbit:oak-api org.apache.jackrabbit:oak-jackrabbit-api org.apache.jackrabbit:oak-shaded-guava diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/AbstractBinaryAccessIT.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/AbstractBinaryAccessIT.java index 15f5c8d87b3..a7080a0289c 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/AbstractBinaryAccessIT.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/AbstractBinaryAccessIT.java @@ -23,7 +23,7 @@ import java.util.Collections; import javax.jcr.Repository; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.api.blob.BlobAccessProvider; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.jcr.AbstractRepositoryTest; diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java index ca26541480e..c4c82bc02ef 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/BinaryAccessDSGCIT.java @@ -51,7 +51,7 @@ import org.apache.jackrabbit.api.binary.BinaryDownload; import org.apache.jackrabbit.api.binary.BinaryDownloadOptions; import org.apache.jackrabbit.api.binary.BinaryUpload; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.AzureDataStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.S3DataStoreFixture; diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java index c3747cf1a54..3d806c46f80 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/AzureDataStoreFixture.java @@ -23,8 +23,8 @@ import java.util.Properties; import java.util.UUID; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Utils; diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/DataStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/DataStoreFixture.java index a1fc25462e0..e8b3e9afc22 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/DataStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/DataStoreFixture.java @@ -19,9 +19,8 @@ package org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; /** * DataStore fixture for parametrized tests. To be used inside NodeStoreFixture implementations. diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/FileDataStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/FileDataStoreFixture.java index a8213efdf7a..ddad9c9d656 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/FileDataStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/FileDataStoreFixture.java @@ -18,8 +18,8 @@ */ package org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.jetbrains.annotations.NotNull; public class FileDataStoreFixture implements DataStoreFixture { diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/S3DataStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/S3DataStoreFixture.java index 8d688e6a16b..16de89bfe84 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/S3DataStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/datastore/S3DataStoreFixture.java @@ -24,8 +24,8 @@ import java.util.Properties; import java.util.UUID; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.s3.S3Backend; import org.apache.jackrabbit.oak.blob.cloud.s3.S3Constants; import org.apache.jackrabbit.oak.blob.cloud.s3.S3BackendHelper; diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMemoryNodeStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMemoryNodeStoreFixture.java index bfbb3f87a13..8379268dfaa 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMemoryNodeStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMemoryNodeStoreFixture.java @@ -25,7 +25,7 @@ import javax.jcr.RepositoryException; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.DataStoreFixture; import org.apache.jackrabbit.oak.jcr.util.ComponentHolder; diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java index 1581c381e6c..25c4e7c6d1d 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/DocumentMongoNodeStoreFixture.java @@ -28,7 +28,7 @@ import javax.jcr.RepositoryException; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.DataStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.util.BinaryAccessDSGCFixture; diff --git a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java index 919a036c6db..e9f0a67a147 100644 --- a/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java +++ b/oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/binary/fixtures/nodestore/SegmentMemoryNodeStoreFixture.java @@ -25,7 +25,7 @@ import javax.jcr.RepositoryException; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.fixtures.datastore.DataStoreFixture; import org.apache.jackrabbit.oak.jcr.binary.util.BinaryAccessDSGCFixture; diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java index 138a9b537a0..e707fece925 100644 --- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java +++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java @@ -43,7 +43,7 @@ import org.apache.commons.io.LineIterator; import org.apache.commons.io.filefilter.IOFileFilter; import org.apache.commons.io.filefilter.RegexFileFilter; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.commons.PerfLogger; import org.apache.jackrabbit.oak.commons.collections.ListUtils; diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java index 6f588c771f1..10f8b82b156 100644 --- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java +++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java @@ -19,9 +19,9 @@ package org.apache.jackrabbit.oak.plugins.index.lucene; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; import org.apache.jackrabbit.oak.plugins.index.lucene.directory.OakDirectory; diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneWritesOnSegmentStatsTest.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneWritesOnSegmentStatsTest.java index 12327b8c1d4..72a9228f48d 100644 --- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneWritesOnSegmentStatsTest.java +++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneWritesOnSegmentStatsTest.java @@ -30,7 +30,7 @@ import java.util.concurrent.ScheduledExecutorService; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.InitialContent; import org.apache.jackrabbit.oak.Oak; import org.apache.jackrabbit.oak.api.ContentRepository; diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/AbstractActiveDeletedBlobTest.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/AbstractActiveDeletedBlobTest.java index 96fc74f1070..2f572e0805d 100644 --- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/AbstractActiveDeletedBlobTest.java +++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/AbstractActiveDeletedBlobTest.java @@ -26,9 +26,9 @@ import java.util.concurrent.ExecutorService; import org.apache.jackrabbit.JcrConstants; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.Tree; import org.apache.jackrabbit.oak.api.Type; diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectionIT.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectionIT.java index 50249f69fb6..55612b629d8 100644 --- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectionIT.java +++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectionIT.java @@ -17,7 +17,7 @@ package org.apache.jackrabbit.oak.plugins.index.lucene.directory; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.InitialContent; import org.apache.jackrabbit.oak.Oak; import org.apache.jackrabbit.oak.api.CommitFailedException; diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java index 324fde77c79..c1d92092247 100644 --- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java +++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java @@ -47,7 +47,7 @@ import ch.qos.logback.classic.Level; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.testing.CIHelper; import org.apache.jackrabbit.oak.commons.collections.ListUtils; import org.apache.jackrabbit.oak.commons.internal.concurrent.ExecutorUtils; diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java index f6f21be130b..d5a78b43fbd 100644 --- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java +++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java @@ -52,7 +52,7 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.io.input.NullInputStream; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java index 182ce736e6b..87aa31be8da 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java @@ -30,8 +30,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.io.FileUtils; import org.apache.felix.cm.file.ConfigurationHandler; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.collections.MapUtils; import org.apache.jackrabbit.oak.plugins.blob.BlobStoreStats; diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java index c79b548bc3a..de5cba57d43 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java @@ -21,8 +21,8 @@ import com.azure.storage.blob.BlobContainerClient; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureBlobContainerProvider; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore; diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java index 9a539cd3cc0..cddfd303831 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/fixture/SegmentTarFixture.java @@ -33,7 +33,7 @@ import com.microsoft.azure.storage.blob.CloudBlobContainer; import com.microsoft.azure.storage.blob.CloudBlobDirectory; import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.Oak; import org.apache.jackrabbit.oak.api.blob.BlobAccessProvider; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/BlobStoreFixtureProvider.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/BlobStoreFixtureProvider.java index 580578157d6..0f1a3d2e0f8 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/BlobStoreFixtureProvider.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/BlobStoreFixtureProvider.java @@ -33,9 +33,9 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.FilenameUtils; import org.apache.felix.cm.file.ConfigurationHandler; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore; import org.apache.jackrabbit.oak.blob.cloud.s3.S3DataStore; import org.apache.jackrabbit.oak.commons.pio.Closer; diff --git a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DummyDataStore.java b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DummyDataStore.java index 4ce0386d231..5b5477b5ca4 100644 --- a/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DummyDataStore.java +++ b/oak-run-commons/src/main/java/org/apache/jackrabbit/oak/run/cli/DummyDataStore.java @@ -21,11 +21,11 @@ import java.io.ByteArrayInputStream; import java.io.InputStream; -import org.apache.jackrabbit.core.data.AbstractDataRecord; -import org.apache.jackrabbit.core.data.AbstractDataStore; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.AbstractDataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.AbstractDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; /** diff --git a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/run/cli/ReadOnlyBlobStoreWrapperTest.java b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/run/cli/ReadOnlyBlobStoreWrapperTest.java index 6dcb58321d3..1d6f19689c5 100644 --- a/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/run/cli/ReadOnlyBlobStoreWrapperTest.java +++ b/oak-run-commons/src/test/java/org/apache/jackrabbit/oak/run/cli/ReadOnlyBlobStoreWrapperTest.java @@ -23,7 +23,7 @@ import java.io.File; import java.io.InputStream; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.junit.Rule; diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCommand.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCommand.java index c870ce56628..24f6547a564 100644 --- a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCommand.java +++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCommand.java @@ -43,7 +43,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.LineIterator; import org.apache.commons.io.filefilter.FileFilterUtils; -import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java index 07e6747f81e..5504e8f22b4 100644 --- a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java +++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java @@ -46,8 +46,8 @@ import joptsimple.OptionSpecBuilder; import org.apache.commons.io.FileUtils; import org.apache.felix.cm.file.ConfigurationHandler; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore; import org.apache.jackrabbit.oak.blob.cloud.s3.S3DataStore; import org.apache.jackrabbit.oak.commons.pio.Closer; diff --git a/oak-run/src/main/resources/org/apache/jackrabbit/oak/fixture/repository.xml b/oak-run/src/main/resources/org/apache/jackrabbit/oak/fixture/repository.xml index 7b3ae32d8c3..4d04d51d705 100644 --- a/oak-run/src/main/resources/org/apache/jackrabbit/oak/fixture/repository.xml +++ b/oak-run/src/main/resources/org/apache/jackrabbit/oak/fixture/repository.xml @@ -30,14 +30,14 @@ virtual file system where the repository stores global state (e.g. registered namespaces, custom node types, etc.) --> - + - + - + - + diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCheckTest.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCheckTest.java index c52aaa94ba3..897d6ea42ce 100644 --- a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCheckTest.java +++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCheckTest.java @@ -42,11 +42,10 @@ import java.util.Set; import org.apache.commons.lang3.StringUtils; -import joptsimple.internal.Strings; import org.apache.commons.io.FileUtils; import org.apache.commons.io.filefilter.FileFilterUtils; import org.apache.felix.cm.file.ConfigurationHandler; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils; import org.apache.jackrabbit.oak.blob.cloud.s3.S3Constants; diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandMetadataTest.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandMetadataTest.java index 5cd5a321be7..e53987acf39 100644 --- a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandMetadataTest.java +++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandMetadataTest.java @@ -28,8 +28,8 @@ import java.util.UUID; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.commons.FileIOUtils; import org.apache.jackrabbit.oak.plugins.blob.MemoryBlobStoreNodeStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java index dc8d082f555..0a477cdfa02 100644 --- a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java +++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/DataStoreCommandTest.java @@ -45,8 +45,8 @@ import joptsimple.OptionException; import org.apache.commons.io.FileUtils; import org.apache.felix.cm.file.ConfigurationHandler; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants; diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/BinaryStorageWithBlobStoreTest.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/BinaryStorageWithBlobStoreTest.java index 1f31bb28aec..20a189bb554 100644 --- a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/BinaryStorageWithBlobStoreTest.java +++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/BinaryStorageWithBlobStoreTest.java @@ -39,7 +39,7 @@ import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.security.user.UserConstants; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.junit.After; import org.junit.Before; import org.junit.Rule; diff --git a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/IndexRegexPropertyWithBinaryExcludedTest.java b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/IndexRegexPropertyWithBinaryExcludedTest.java index 984743a36fb..b9f6ba11cf1 100644 --- a/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/IndexRegexPropertyWithBinaryExcludedTest.java +++ b/oak-run/src/test/java/org/apache/jackrabbit/oak/run/query/IndexRegexPropertyWithBinaryExcludedTest.java @@ -49,7 +49,7 @@ import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.security.user.UserConstants; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.junit.After; import org.junit.Before; import org.junit.Rule; diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/BinariesInlineThresholdIT.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/BinariesInlineThresholdIT.java index 4444055946a..f6f2f13db8f 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/BinariesInlineThresholdIT.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/BinariesInlineThresholdIT.java @@ -30,7 +30,7 @@ import java.io.IOException; import java.util.Random; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.PropertyState; diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java index 868bc2b7383..dff094afbb0 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/ExternalBlobIT.java @@ -38,7 +38,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreServiceTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreServiceTest.java index 8fe7d65910d..621566a2c12 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreServiceTest.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/SegmentNodeStoreServiceTest.java @@ -32,7 +32,7 @@ import java.util.Hashtable; import java.util.List; -import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; import org.apache.jackrabbit.oak.plugins.blob.BlobGCMBean; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/StandbySegmentBlobTestIT.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/StandbySegmentBlobTestIT.java index 2fad0ea74bf..6576e43f735 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/StandbySegmentBlobTestIT.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/StandbySegmentBlobTestIT.java @@ -25,7 +25,7 @@ import java.io.File; import org.apache.commons.io.input.NullInputStream; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/client/RemoteBlobProcessorTest.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/client/RemoteBlobProcessorTest.java index 0d316abd706..c83821f0f85 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/client/RemoteBlobProcessorTest.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/standby/client/RemoteBlobProcessorTest.java @@ -22,7 +22,7 @@ import java.io.File; import org.apache.commons.io.input.NullInputStream; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.api.Type; import org.apache.jackrabbit.oak.segment.SegmentNodeStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/FileStoreParameterResolver.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/FileStoreParameterResolver.java index ab13df4b6dc..5899874834b 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/FileStoreParameterResolver.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/FileStoreParameterResolver.java @@ -116,12 +116,12 @@ public Object resolveParameter(ParameterContext parameterContext, ExtensionConte @SuppressWarnings("unchecked") private T getOrCreateFileStore(ExtensionContext ctx, Class type) { ExtensionContext.Store store = ctx.getStore(NAMESPACE); + CloseablePath segmentstoreDir = store.getOrComputeIfAbsent("tempdir-for-" + FileStore.class.getSimpleName(), + key -> new CloseablePath(computePathForTest(ctx)), + CloseablePath.class + ); return store.getOrComputeIfAbsent(type.getName(), k -> { try { - CloseablePath segmentstoreDir = store.getOrComputeIfAbsent("tempdir-for-" + FileStore.class.getSimpleName(), - key -> new CloseablePath(computePathForTest(ctx)), - CloseablePath.class - ); Files.createDirectories(segmentstoreDir.path); FileStoreBuilder fileStoreBuilder = FileStoreBuilder.fileStoreBuilder(segmentstoreDir.path.toFile()) .withStringCacheSize(0) diff --git a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/TemporaryBlobStore.java b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/TemporaryBlobStore.java index 7c47de8c50b..eb2a08e199d 100644 --- a/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/TemporaryBlobStore.java +++ b/oak-segment-tar/src/test/java/org/apache/jackrabbit/oak/segment/test/TemporaryBlobStore.java @@ -17,8 +17,8 @@ package org.apache.jackrabbit.oak.segment.test; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.FileDataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.junit.rules.ExternalResource; diff --git a/oak-store-composite/pom.xml b/oak-store-composite/pom.xml index cc21afea2d3..ed1832265e0 100644 --- a/oak-store-composite/pom.xml +++ b/oak-store-composite/pom.xml @@ -114,6 +114,11 @@ oak-api ${project.version} + + org.apache.jackrabbit + oak-blob + ${project.version} + org.apache.jackrabbit oak-core diff --git a/oak-store-composite/src/test/java/org/apache/jackrabbit/oak/composite/it/CompositeTestSupport.java b/oak-store-composite/src/test/java/org/apache/jackrabbit/oak/composite/it/CompositeTestSupport.java index b1e2ffa58a6..b9a48fe5d17 100644 --- a/oak-store-composite/src/test/java/org/apache/jackrabbit/oak/composite/it/CompositeTestSupport.java +++ b/oak-store-composite/src/test/java/org/apache/jackrabbit/oak/composite/it/CompositeTestSupport.java @@ -90,7 +90,6 @@ public static Option oak() { public static Option jackrabbit() { return composite( - mavenBundle().groupId(JACKRABBIT_GROUP_ID).artifactId("jackrabbit-data").versionAsInProject(), mavenBundle().groupId(JACKRABBIT_GROUP_ID).artifactId("jackrabbit-jcr-commons").versionAsInProject(), mavenBundle().groupId("javax.jcr").artifactId("jcr").versionAsInProject(), mavenBundle().groupId("commons-codec").artifactId("commons-codec").versionAsInProject(), diff --git a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java index f3938cedcb1..8e75c6228b5 100644 --- a/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java +++ b/oak-store-document/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java @@ -29,7 +29,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.commons.collections.ListUtils; import org.apache.jackrabbit.oak.commons.collections.SetUtils; diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/MigrationFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/MigrationFactory.java index aedef47a1b0..ee5ca22d461 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/MigrationFactory.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/MigrationFactory.java @@ -17,6 +17,7 @@ package org.apache.jackrabbit.oak.upgrade.cli; import java.io.IOException; +import java.io.InputStream; import java.util.Collections; import java.util.List; import java.util.ServiceLoader; @@ -26,7 +27,7 @@ import org.apache.jackrabbit.core.RepositoryContext; import org.apache.jackrabbit.oak.commons.collections.ListUtils; import org.apache.jackrabbit.oak.commons.pio.Closer; -import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; +import org.apache.jackrabbit.oak.spi.blob.BlobOptions; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.commit.CommitHook; import org.apache.jackrabbit.oak.spi.state.NodeStore; @@ -56,11 +57,10 @@ public MigrationFactory(MigrationOptions options, StoreArguments stores, Datasto public RepositoryUpgrade createUpgrade() throws IOException, RepositoryException, CliArgumentException { RepositoryContext src = stores.getSrcStore().create(closer); - BlobStore srcBlobStore = new DataStoreBlobStore(src.getDataStore()); + BlobStore srcBlobStore = new ToJackrabbitDataStoreDelegatingBlobStore(src.getDataStore()); NodeStore dstStore = createTarget(closer, srcBlobStore); return createUpgrade(src, dstStore); } - public RepositorySidegrade createSidegrade() throws IOException, CliArgumentException { BlobStore srcBlobStore = datastores.getSrcBlobStore().create(closer); NodeStore srcStore = stores.getSrcStore().create(srcBlobStore, closer); @@ -125,4 +125,91 @@ private List loadCommitHooks() { return Collections.unmodifiableList(ListUtils.toList(loader.iterator())); } + /** + * Wraps An Oak BlobStore around a Jackrabbit Datastore + */ + private static class ToJackrabbitDataStoreDelegatingBlobStore implements BlobStore { + + private org.apache.jackrabbit.core.data.DataStore delegate; + + public ToJackrabbitDataStoreDelegatingBlobStore( + org.apache.jackrabbit.core.data.DataStore delegate) { + this.delegate = delegate; + } + + @Override + public String writeBlob(InputStream inputStream) throws IOException { + try { + org.apache.jackrabbit.core.data.DataRecord record = delegate.addRecord(inputStream); + return record.getIdentifier().toString(); + } catch (org.apache.jackrabbit.core.data.DataStoreException ex) { + throw new IOException("Failed to write blob", ex); + } + } + + @Override + public String writeBlob(InputStream inputStream, BlobOptions options) throws IOException { + try { + org.apache.jackrabbit.core.data.DataRecord record = delegate.addRecord(inputStream); + return record.getIdentifier().toString(); + } catch (org.apache.jackrabbit.core.data.DataStoreException ex) { + throw new IOException("Failed to write blob", ex); + } + } + + @Override + public int readBlob(String blobId, long pos, byte[] buff, int off, int length) + throws IOException { + + try (InputStream is = getInputStream(blobId)) { + + if (pos > 0) { + long skipped = is.skip(pos); + if (skipped < pos) { + return -1; + } + } + + return is.read(buff, off, length); + } + } + + @Override + public long getBlobLength(String blobId) throws IOException { + try { + org.apache.jackrabbit.core.data.DataRecord record = delegate.getRecord(new org.apache.jackrabbit.core.data.DataIdentifier(blobId)); + return record.getLength(); + } catch (org.apache.jackrabbit.core.data.DataStoreException ex) { + throw new IOException("Failed to get blob length", ex); + } + } + + @Override + public InputStream getInputStream(String blobId) throws IOException { + try { + org.apache.jackrabbit.core.data.DataRecord record = delegate.getRecord(new org.apache.jackrabbit.core.data.DataIdentifier(blobId)); + return record.getStream(); + } catch (org.apache.jackrabbit.core.data.DataStoreException ex) { + throw new IOException("Failed to get input stream", ex); + } + } + + @Override + public String getBlobId(String reference) { + // Usually same as blobId for Jackrabbit datastore + return reference; + } + + @Override + public String getReference(String blobId) { + // Jackrabbit DataStore doesn't distinguish strongly here + return blobId; + } + + @Override + public void close() throws Exception { + delegate.close(); + } + } + } diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/AzureDataStoreFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/AzureDataStoreFactory.java index 52b52a65e46..af12f36b75e 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/AzureDataStoreFactory.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/AzureDataStoreFactory.java @@ -35,12 +35,12 @@ import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.jackrabbit.core.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.pio.Closer; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; import org.apache.jackrabbit.oak.stats.StatisticsProvider; diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/FileDataStoreFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/FileDataStoreFactory.java index 361759c6d72..3e947d7b7ff 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/FileDataStoreFactory.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/FileDataStoreFactory.java @@ -19,11 +19,11 @@ import java.io.Closeable; import java.io.IOException; -import org.apache.jackrabbit.core.data.FileDataStore; import org.apache.jackrabbit.oak.commons.pio.Closer; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.blob.data.FileDataStore; public class FileDataStoreFactory implements BlobStoreFactory { diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/S3DataStoreFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/S3DataStoreFactory.java index 951781f7161..6fd38045ba9 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/S3DataStoreFactory.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/S3DataStoreFactory.java @@ -34,12 +34,12 @@ import javax.jcr.RepositoryException; import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.DataStoreException; import org.apache.jackrabbit.oak.blob.cloud.s3.S3DataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.commons.pio.Closer; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; import org.apache.jackrabbit.oak.stats.StatisticsProvider; diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/SafeDataStoreBlobStore.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/SafeDataStoreBlobStore.java index 2fdd8403afb..22a3321b752 100644 --- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/SafeDataStoreBlobStore.java +++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/blob/SafeDataStoreBlobStore.java @@ -16,12 +16,12 @@ */ package org.apache.jackrabbit.oak.upgrade.cli.blob; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStore; -import org.apache.jackrabbit.core.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.InMemoryDataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataIdentifier; +import org.apache.jackrabbit.oak.spi.blob.data.DataRecord; +import org.apache.jackrabbit.oak.spi.blob.data.DataStore; +import org.apache.jackrabbit.oak.spi.blob.data.DataStoreException; import org.apache.jackrabbit.oak.spi.blob.stats.StatsCollectingStreams; import org.jetbrains.annotations.NotNull; import org.slf4j.Logger;