Skip to content

Commit

Permalink
Java V2: Create 27 Java V2 code examples for S3 directory bucket acti…
Browse files Browse the repository at this point in the history
…ons, S3 (#7109)

* 1. Add 26 Java V2 code examples for S3 directory bucket actions. Add 1 Java V2 code example for S3 directory bucket getting-started hello. 2. Update metadata.yaml 3. Update config.py for WRITEME

---------

Co-authored-by: David Souther <[email protected]>
  • Loading branch information
yanjieniu and DavidSouther authored Nov 27, 2024
1 parent 7f17f6e commit a28443e
Show file tree
Hide file tree
Showing 37 changed files with 5,772 additions and 7 deletions.
416 changes: 414 additions & 2 deletions .doc_gen/metadata/s3-directory-buckets_metadata.yaml

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions .tools/readmes/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
"sdk_api_ref": 'https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/{{service["name"]}}/package-summary.html',
"service_folder_overrides": {
"s3-control": "javav2/example_code/s3/src/main/java/com/example/s3/batch",
"s3-directory-buckets": "javav2/example_code/s3/src/main/java/com/example/s3/directorybucket",
"medical-imaging": "javav2/example_code/medicalimaging",
},
},
Expand Down
8 changes: 5 additions & 3 deletions javav2/example_code/s3/checkstyle.xml
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
<?xml version="1.0" ?>

<!DOCTYPE module PUBLIC
"-//Checkstyle//DTD Checkstyle Configuration 1.2//EN"
"https://checkstyle.org/dtds/configuration_1_2.dtd">
"-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
"https://checkstyle.org/dtds/configuration_1_3.dtd">

<module name="Checker">
<module name="TreeWalker">

<module name="IllegalCatch"/>
<module name="IllegalCatch">
<property name="illegalClassNames" value="Error,Exception,Throwable,java.lang.Error,java.lang.Exception,java.lang.Throwable"/>
</module>
<module name="EmptyStatement"/>
<module name="AvoidStarImport"/>
<module name="UnusedImports"/>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

package com.example.s3.directorybucket;

// snippet-start:[s3directorybuckets.java2.abortmultipartupload.import]
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.S3Exception;

import static com.example.s3.util.S3DirectoryBucketUtils.createDirectoryBucket;
import static com.example.s3.util.S3DirectoryBucketUtils.createDirectoryBucketMultipartUpload;
import static com.example.s3.util.S3DirectoryBucketUtils.createS3Client;
import static com.example.s3.util.S3DirectoryBucketUtils.deleteDirectoryBucket;
// snippet-end:[s3directorybuckets.java2.abortmultipartupload.import]

/**
* Before running this example:
* <p>
* The SDK must be able to authenticate AWS requests on your behalf. If you have
* not configured
* authentication for SDKs and tools, see
* https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs
* and Tools Reference Guide.
* <p>
* You must have a runtime environment configured with the Java SDK.
* See
* https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in
* the Developer Guide if this is not set up.
* <p>
* To use S3 directory buckets, configure a gateway VPC endpoint. This is the
* recommended method to enable directory bucket traffic without
* requiring an internet gateway or NAT device. For more information on
* configuring VPC gateway endpoints, visit
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway.
* <p>
* Directory buckets are available in specific AWS Regions and Zones. For
* details on Regions and Zones supporting directory buckets, see
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints.
*/

public class AbortDirectoryBucketMultipartUploads {
private static final Logger logger = LoggerFactory.getLogger(AbortDirectoryBucketMultipartUploads.class);

// snippet-start:[s3directorybuckets.java2.abortmultipartupload.main]

/**
* Aborts a specific multipart upload for the specified S3 directory bucket.
*
* @param s3Client The S3 client used to interact with S3
* @param bucketName The name of the directory bucket
* @param objectKey The key (name) of the object to be uploaded
* @param uploadId The upload ID of the multipart upload to abort
* @return True if the multipart upload is successfully aborted, false otherwise
*/
public static boolean abortDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName,
String objectKey, String uploadId) {
logger.info("Aborting multipart upload: {} for bucket: {}", uploadId, bucketName);
try {
// Abort the multipart upload
AbortMultipartUploadRequest abortMultipartUploadRequest = AbortMultipartUploadRequest.builder()
.bucket(bucketName)
.key(objectKey)
.uploadId(uploadId)
.build();

s3Client.abortMultipartUpload(abortMultipartUploadRequest);
logger.info("Aborted multipart upload: {} for object: {}", uploadId, objectKey);
return true;
} catch (S3Exception e) {
logger.error("Failed to abort multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(),
e.awsErrorDetails().errorCode(), e);
return false;
}
}
// snippet-end:[s3directorybuckets.java2.abortmultipartupload.main]

public static void main(String[] args) {
Region region = Region.US_WEST_2;
S3Client s3Client = createS3Client(region);
String zone = "usw2-az1";
String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3";
String objectKey = "largeObject"; // your-object-key
String uploadId;

try {
// Create the directory bucket
createDirectoryBucket(s3Client, bucketName, zone);
// Create a Multipart Upload Request
uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey);

// Abort Multipart Uploads
boolean aborted = abortDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey, uploadId);
if (aborted) {
logger.info("Multipart upload successfully aborted for bucket: {}", bucketName);
} else {
logger.error("Failed to abort multipart upload for bucket: {}", bucketName);
}
} catch (S3Exception e) {
logger.error("An error occurred during S3 operations: {} - Error code: {}",
e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode(), e);
} finally {
// Tear down by deleting the bucket
try {
deleteDirectoryBucket(s3Client, bucketName);
} catch (S3Exception e) {
logger.error("Failed to delete the bucket due to S3 error: {} - Error code: {}",
e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode(), e);
} catch (RuntimeException e) {
logger.error("Failed to delete the bucket due to unexpected error: {}", e.getMessage(), e);
} finally {
s3Client.close();
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.example.s3.directorybucket;

// snippet-start:[s3directorybuckets.java2.completedirectorybucketmultipartupload.import]

import com.example.s3.util.S3DirectoryBucketUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload;
import software.amazon.awssdk.services.s3.model.CompletedPart;
import software.amazon.awssdk.services.s3.model.S3Exception;

import java.io.IOException;
import java.nio.file.Path;
import java.util.List;

import static com.example.s3.util.S3DirectoryBucketUtils.createDirectoryBucket;
import static com.example.s3.util.S3DirectoryBucketUtils.createDirectoryBucketMultipartUpload;
import static com.example.s3.util.S3DirectoryBucketUtils.createS3Client;
import static com.example.s3.util.S3DirectoryBucketUtils.deleteAllObjectsInDirectoryBucket;
import static com.example.s3.util.S3DirectoryBucketUtils.deleteDirectoryBucket;
import static com.example.s3.util.S3DirectoryBucketUtils.getFilePath;
import static com.example.s3.util.S3DirectoryBucketUtils.multipartUploadForDirectoryBucket;
// snippet-end:[s3directorybuckets.java2.completedirectorybucketmultipartupload.import]

/**
* Before running this example:
* <p>
* The SDK must be able to authenticate AWS requests on your behalf. If you have
* not configured
* authentication for SDKs and tools, see
* https://docs.aws.amazon.com/sdkref/latest/guide/access.html in the AWS SDKs
* and Tools Reference Guide.
* <p>
* You must have a runtime environment configured with the Java SDK.
* See
* https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html in
* the Developer Guide if this is not set up.
* <p>
* To use S3 directory buckets, configure a gateway VPC endpoint. This is the
* recommended method to enable directory bucket traffic without
* requiring an internet gateway or NAT device. For more information on
* configuring VPC gateway endpoints, visit
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-networking-vpc-gateway.
* <p>
* Directory buckets are available in specific AWS Regions and Zones. For
* details on Regions and Zones supporting directory buckets, see
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-networking.html#s3-express-endpoints.
*/

public class CompleteDirectoryBucketMultipartUpload {
private static final Logger logger = LoggerFactory.getLogger(CompleteDirectoryBucketMultipartUpload.class);

// snippet-start:[s3directorybuckets.java2.completedirectorybucketmultipartupload.main]

/**
* This method completes the multipart upload request by collating all the
* upload parts.
*
* @param s3Client The S3 client used to interact with S3
* @param bucketName The name of the directory bucket
* @param objectKey The key (name) of the object to be uploaded
* @param uploadId The upload ID used to track the multipart upload
* @param uploadParts The list of completed parts
* @return True if the multipart upload is successfully completed, false
* otherwise
*/
public static boolean completeDirectoryBucketMultipartUpload(S3Client s3Client, String bucketName, String objectKey,
String uploadId, List<CompletedPart> uploadParts) {
try {
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder()
.parts(uploadParts)
.build();
CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder()
.bucket(bucketName)
.key(objectKey)
.uploadId(uploadId)
.multipartUpload(completedMultipartUpload)
.build();

CompleteMultipartUploadResponse response = s3Client.completeMultipartUpload(completeMultipartUploadRequest);
logger.info("Multipart upload completed. ETag: {}", response.eTag());
return true;
} catch (S3Exception e) {
logger.error("Failed to complete multipart upload: {} - Error code: {}", e.awsErrorDetails().errorMessage(),
e.awsErrorDetails().errorCode(), e);
return false;
}
}
// snippet-end:[s3directorybuckets.java2.completedirectorybucketmultipartupload.main]

// Main method for testing
public static void main(String[] args) {
Region region = Region.US_WEST_2;
S3Client s3Client = createS3Client(region);
String zone = "usw2-az1";
String bucketName = "test-bucket-" + System.currentTimeMillis() + "--" + zone + "--x-s3";
String uploadId;
String objectKey = "largeObject";
Path filePath = getFilePath("directoryBucket/sample-large-object.jpg");

try {
// Create the directory bucket
createDirectoryBucket(s3Client, bucketName, zone);
// Create a multipart upload
uploadId = createDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey);
// Perform multipart upload for the directory bucket
List<CompletedPart> uploadedParts = multipartUploadForDirectoryBucket(s3Client, bucketName, objectKey,
uploadId, filePath);
logger.info("Uploaded parts: {}", uploadedParts);
// Complete Multipart Uploads
boolean completed = completeDirectoryBucketMultipartUpload(s3Client, bucketName, objectKey, uploadId,
uploadedParts);
if (completed) {
logger.info("Multipart upload successfully completed for bucket: {}", bucketName);
} else {
logger.error("Failed to complete multipart upload for bucket: {}", bucketName);
}
} catch (S3Exception e) {
logger.error("An error occurred during S3 operations: {} - Error code: {}",
e.awsErrorDetails().errorMessage(), e.awsErrorDetails().errorCode());
} catch (IOException e) {
logger.error("An I/O error occurred: {}", e.getMessage());
} finally {
// Error handling
try {
logger.info("Starting cleanup for bucket: {}", bucketName);
S3DirectoryBucketUtils.abortDirectoryBucketMultipartUploads(s3Client, bucketName);
deleteAllObjectsInDirectoryBucket(s3Client, bucketName);
deleteDirectoryBucket(s3Client, bucketName);
logger.info("Cleanup completed for bucket: {}", bucketName);
} catch (S3Exception e) {
logger.error("Error during cleanup: {} - Error code: {}", e.awsErrorDetails().errorMessage(),
e.awsErrorDetails().errorCode(), e);
} catch (RuntimeException e) {
logger.error("Unexpected error during cleanup: {}", e.getMessage(), e);
} finally {
// Close the S3 client
s3Client.close();
}
}
}
}
Loading

0 comments on commit a28443e

Please sign in to comment.