Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.s3.endpoint;

import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError;

import java.io.IOException;
import java.io.InputStream;
import javax.ws.rs.core.Response;
import org.apache.hadoop.ozone.audit.S3GAction;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpStatus;

/**
* Handler for default bucket CRUD operations.
* Implements PUT (create bucket) and DELETE operations when no
* subresource query parameters are present.
*
* This handler processes bucket-level requests that do not target
* specific subresources (such as {@code ?acl}, {@code ?uploads},
* or {@code ?delete}), which are handled by dedicated handlers.
*
* This handler extends EndpointBase to inherit all required functionality
* (configuration, headers, request context, audit logging, metrics, etc.).
*/
public class BucketCrudHandler extends EndpointBase implements BucketOperationHandler {

/**
* Handle only plain PUT bucket (create bucket), not subresources.
*/
private boolean shouldHandlePutCreateBucket() {
return queryParams().get(QueryParams.ACL) == null
&& queryParams().get(QueryParams.UPLOADS) == null
&& queryParams().get(QueryParams.DELETE) == null;
Comment on lines +52 to +54
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Handlers are tried in order, so BucketCrudHandler does not need to check conditions, just handle all requests that reach it, like BucketEndpoint did.

We can keep this method if you prefer, but then please rename to shouldHandle and reuse in handleDeleteRequest.

}

/**
* Handle PUT /{bucket} for bucket creation.
*/
@Override
public Response handlePutRequest(String bucketName, InputStream body)
throws IOException, OS3Exception {

if (!shouldHandlePutCreateBucket()) {
return null;
}

long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.CREATE_BUCKET;

try {
String location = createS3Bucket(bucketName);
auditWriteSuccess(s3GAction);
getMetrics().updateCreateBucketSuccessStats(startNanos);
return Response.status(HttpStatus.SC_OK).header("Location", location)
.build();
} catch (OMException exception) {
auditWriteFailure(s3GAction, exception);
getMetrics().updateCreateBucketFailureStats(startNanos);
if (exception.getResult() == OMException.ResultCodes.INVALID_BUCKET_NAME) {
throw newError(S3ErrorTable.INVALID_BUCKET_NAME, bucketName, exception);
}
throw exception;
} catch (Exception ex) {
auditWriteFailure(s3GAction, ex);
throw ex;
}
}

/**
* Handle DELETE /{bucket} for bucket deletion.
*/
@Override
public Response handleDeleteRequest(String bucketName)
throws IOException, OS3Exception {

if (queryParams().get(QueryParams.ACL) != null
|| queryParams().get(QueryParams.UPLOADS) != null
|| queryParams().get(QueryParams.DELETE) != null) {
return null;
}

long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.DELETE_BUCKET;

try {
if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) {
OzoneBucket bucket = getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
}
deleteS3Bucket(bucketName);
} catch (OMException ex) {
auditWriteFailure(s3GAction, ex);
getMetrics().updateDeleteBucketFailureStats(startNanos);
if (ex.getResult() == OMException.ResultCodes.BUCKET_NOT_EMPTY) {
throw newError(S3ErrorTable.BUCKET_NOT_EMPTY, bucketName, ex);
} else if (ex.getResult() == OMException.ResultCodes.BUCKET_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
} else if (isAccessDenied(ex)) {
throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
} else {
throw ex;
}
} catch (Exception ex) {
auditWriteFailure(s3GAction, ex);
throw ex;
}

auditWriteSuccess(s3GAction);
getMetrics().updateDeleteBucketSuccessStats(startNanos);
return Response
.status(HttpStatus.SC_NO_CONTENT)
.build();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@
import org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams;
import org.apache.hadoop.ozone.s3.util.S3StorageType;
import org.apache.hadoop.util.Time;
import org.apache.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -308,35 +307,7 @@ public Response put(
}
}

// No handler handled the request, execute default operation: create bucket
return handleCreateBucket(bucketName);
}

/**
* Default PUT bucket operation (create bucket).
*/
private Response handleCreateBucket(String bucketName)
throws IOException, OS3Exception {
long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.CREATE_BUCKET;

try {
String location = createS3Bucket(bucketName);
auditWriteSuccess(s3GAction);
getMetrics().updateCreateBucketSuccessStats(startNanos);
return Response.status(HttpStatus.SC_OK).header("Location", location)
.build();
} catch (OMException exception) {
auditWriteFailure(s3GAction, exception);
getMetrics().updateCreateBucketFailureStats(startNanos);
if (exception.getResult() == ResultCodes.INVALID_BUCKET_NAME) {
throw newError(S3ErrorTable.INVALID_BUCKET_NAME, bucketName, exception);
}
throw exception;
} catch (Exception ex) {
auditWriteFailure(s3GAction, ex);
throw ex;
}
throw newError(S3ErrorTable.NOT_IMPLEMENTED, "PUT bucket");
}

public Response listMultipartUploads(
Expand Down Expand Up @@ -429,38 +400,14 @@ public Response head(@PathParam(BUCKET) String bucketName)
@DELETE
public Response delete(@PathParam(BUCKET) String bucketName)
throws IOException, OS3Exception {
long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.DELETE_BUCKET;

try {
if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) {
OzoneBucket bucket = getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
}
deleteS3Bucket(bucketName);
} catch (OMException ex) {
auditWriteFailure(s3GAction, ex);
getMetrics().updateDeleteBucketFailureStats(startNanos);
if (ex.getResult() == ResultCodes.BUCKET_NOT_EMPTY) {
throw newError(S3ErrorTable.BUCKET_NOT_EMPTY, bucketName, ex);
} else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
} else if (isAccessDenied(ex)) {
throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
} else {
throw ex;
for (BucketOperationHandler handler : handlers) {
Response response = handler.handleDeleteRequest(bucketName);
if (response != null) {
return response;
}
} catch (Exception ex) {
auditWriteFailure(s3GAction, ex);
throw ex;
}

auditWriteSuccess(s3GAction);
getMetrics().updateDeleteBucketSuccessStats(startNanos);
return Response
.status(HttpStatus.SC_NO_CONTENT)
.build();

throw newError(S3ErrorTable.NOT_IMPLEMENTED, "DELETE bucket");
}

/**
Expand Down Expand Up @@ -557,6 +504,7 @@ protected void init() {
// initialize handlers
handlers = new ArrayList<>();
addHandler(new BucketAclHandler());
addHandler(new BucketCrudHandler());
}

private <T extends EndpointBase & BucketOperationHandler> void addHandler(T handler) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,4 +62,9 @@ default Response handleGetRequest(String bucketName)
throws IOException, OS3Exception {
return null;
}

default Response handleDeleteRequest(String bucketName)
throws IOException, OS3Exception {
return null;
}
}