diff --git a/.doc_gen/cross-content/cross_Neptune_Java_block.xml b/.doc_gen/cross-content/cross_Neptune_Java_block.xml new file mode 100644 index 00000000000..21d8cd8e8c2 --- /dev/null +++ b/.doc_gen/cross-content/cross_Neptune_Java_block.xml @@ -0,0 +1,14 @@ + + + %phrases-shared; + ]> + + + Shows how to use &neptunelong; Java API to create a Lambda function that queries graph data within the VPC. + + + For complete source code and instructions on how to set up and run, see the full example on + GitHub. + + \ No newline at end of file diff --git a/.doc_gen/metadata/cross_metadata.yaml b/.doc_gen/metadata/cross_metadata.yaml index 203bd55ce40..5130d597490 100644 --- a/.doc_gen/metadata/cross_metadata.yaml +++ b/.doc_gen/metadata/cross_metadata.yaml @@ -1,4 +1,18 @@ # zexi 0.4.0 +cross_Neptune_Query: + title: Use the &neptunelong; API to develop a &LAM; function that queries graph data + title_abbrev: Use the &neptune; API to query graph data + synopsis: use the &neptune; API to query graph data. + category: Scenarios + languages: + Java: + versions: + - sdk_version: 2 + block_content: cross_Neptune_Java_block.xml + service_main: neptune + services: + neptune: + lambda: cross_MessageProcessingFrameworkTutorial: title: Use the &AWS; Message Processing Framework for .NET to publish and receive &SQS; messages title_abbrev: Use the &AWS; Message Processing Framework for .NET with &SQS; diff --git a/.doc_gen/metadata/neptune_metadata.yaml b/.doc_gen/metadata/neptune_metadata.yaml new file mode 100644 index 00000000000..6a616a3e6cc --- /dev/null +++ b/.doc_gen/metadata/neptune_metadata.yaml @@ -0,0 +1,239 @@ +# zexi 0.4.0 +neptune_Hello: + title: Hello &neptunelong; + title_abbrev: Hello &neptune; + synopsis: get started using &neptune;. + category: Hello + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.hello.main + services: + neptune: {DescribeDBClustersPaginator} +neptune_ExecuteQuery: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.graph.execute.main + services: + neptune: {ExecuteQuery} +neptune_CreateGraph: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.graph.create.main + services: + neptune: {CreateGraph} +neptune_ExecuteOpenCypherExplainQuery: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.data.query.opencypher.main + services: + neptune: {ExecuteOpenCypherExplainQuery} +neptune_ExecuteGremlinProfileQuery: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.data.query.gremlin.main + services: + neptune: {ExecuteGremlinProfileQuery} +neptune_ExecuteGremlinQuery: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.data.query.gremlin.profile.main + services: + neptune: {ExecuteGremlinQuery} +neptune_DeleteDBSubnetGroup: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.delete.subnet.group.main + services: + neptune: {DeleteDBSubnetGroup} +neptune_DeleteDBCluster: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.delete.cluster.main + services: + neptune: {DeleteDBCluster} +neptune_DeleteDBInstance: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.delete.instance.main + services: + neptune: {DeleteDBInstance} +neptune_StartDBCluster: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.start.cluster.main + services: + neptune: {StartDBCluster} +neptune_StopDBCluster: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.stop.cluster.main + services: + neptune: {StopDBCluster} +neptune_DescribeDBClusters: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.describe.cluster.main + services: + neptune: {DescribeDBClusters} +neptune_DescribeDBInstances: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.describe.dbinstance.main + services: + neptune: {DescribeDBInstances} +neptune_CreateDBInstance: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.create.dbinstance.main + services: + neptune: {CreateDBInstance} +neptune_CreateDBCluster: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.create.cluster.main + services: + neptune: {CreateDBCluster} +neptune_CreateDBSubnetGroup: + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: + snippet_tags: + - neptune.java2.create.subnet.main + services: + neptune: {CreateDBSubnetGroup} +neptune_Scenario: + synopsis_list: + - Create an &neptunelong; Subnet Group. + - Create an &neptune; Cluster. + - Create an &neptune; Instance. + - Check the status of the &neptune; Instance. + - Show &neptune; cluster details. + - Stop the &neptune; cluster. + - Start the &neptune; cluster. + - Delete the &neptune; Assets. + category: Basics + languages: + Java: + versions: + - sdk_version: 2 + github: javav2/example_code/neptune + sdkguide: + excerpts: + - description: Run an interactive scenario demonstrating &neptune; features. + snippet_tags: + - neptune.java2.scenario.main + - description: A wrapper class for &neptune; SDK methods. + snippet_tags: + - neptune.java2.actions.main + services: + neptune: {} diff --git a/javav2/example_code/neptune/.gitignore b/javav2/example_code/neptune/.gitignore new file mode 100644 index 00000000000..5ff6309b719 --- /dev/null +++ b/javav2/example_code/neptune/.gitignore @@ -0,0 +1,38 @@ +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store \ No newline at end of file diff --git a/javav2/example_code/neptune/README.md b/javav2/example_code/neptune/README.md new file mode 100644 index 00000000000..c113b49fc31 --- /dev/null +++ b/javav2/example_code/neptune/README.md @@ -0,0 +1,123 @@ +# Neptune code examples for the SDK for Java 2.x + +## Overview + +Shows how to use the AWS SDK for Java 2.x to work with Amazon Neptune. + + + + +_Neptune is a serverless graph database designed for superior scalability and availability._ + +## ⚠ Important + +* Running this code might result in charges to your AWS account. For more details, see [AWS Pricing](https://aws.amazon.com/pricing/) and [Free Tier](https://aws.amazon.com/free/). +* Running the tests might result in charges to your AWS account. +* We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). +* This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). + + + + +## Code examples + +### Prerequisites + +For prerequisites, see the [README](../../README.md#Prerequisites) in the `javav2` folder. + + + + + +### Get started + +- [Hello Neptune](src/main/java/com/example/neptune/HelloNeptune.java#L14) (`DescribeDBClustersPaginator`) + + +### Basics + +Code examples that show you how to perform the essential operations within a service. + +- [Learn the basics](src/main/java/com/example/neptune/scenerio/NeptuneScenario.java) + + +### Single actions + +Code excerpts that show you how to call individual service functions. + +- [CreateDBCluster](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L443) +- [CreateDBInstance](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L404) +- [CreateDBSubnetGroup](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L478) +- [DeleteDBCluster](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L143) +- [DeleteDBInstance](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L168) +- [DeleteDBSubnetGroup](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L126) +- [DescribeDBClusters](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L294) +- [DescribeDBInstances](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L345) +- [StartDBCluster](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L262) +- [StopDBCluster](src/main/java/com/example/neptune/scenerio/NeptuneActions.java#L278) + + + + + +## Run the examples + +### Instructions + + + + + +#### Hello Neptune + +This example shows you how to get started using Neptune. + + +#### Learn the basics + +This example shows you how to do the following: + +- Create an Amazon Neptune Subnet Group. +- Create an Neptune Cluster. +- Create an Neptune Instance. +- Check the status of the Neptune Instance. +- Show Neptune cluster details. +- Stop the Neptune cluster. +- Start the Neptune cluster. +- Delete the Neptune Assets. + + + + + + + + + +### Tests + +⚠ Running tests might result in charges to your AWS account. + + +To find instructions for running these tests, see the [README](../../README.md#Tests) +in the `javav2` folder. + + + + + + +## Additional resources + +- [Neptune User Guide](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html) +- [Neptune API Reference](https://docs.aws.amazon.com/neptune/latest/apiref/Welcome.html) +- [SDK for Java 2.x Neptune reference](https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/services/neptune/package-summary.html) + + + + +--- + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 diff --git a/javav2/example_code/neptune/pom.xml b/javav2/example_code/neptune/pom.xml new file mode 100644 index 00000000000..6ae67fba0de --- /dev/null +++ b/javav2/example_code/neptune/pom.xml @@ -0,0 +1,144 @@ + + + 4.0.0 + + org.example + neptune + 1.0-SNAPSHOT + + + UTF-8 + 21 + 21 + 21 + + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.5.2 + + + org.apache.maven.plugins + maven-compiler-plugin + 3.11.0 + + ${java.version} + 21 + 21 + --enable-preview + + + + org.apache.maven.plugins + maven-resources-plugin + 3.3.1 + + + + + + + software.amazon.awssdk + bom + 2.31.8 + pom + import + + + org.apache.logging.log4j + log4j-bom + 2.23.1 + pom + import + + + + + + org.junit.jupiter + junit-jupiter + 5.11.4 + test + + + software.amazon.awssdk + neptune + + + software.amazon.awssdk + netty-nio-client + + + software.amazon.awssdk + ec2 + + + software.amazon.awssdk + neptunedata + + + software.amazon.awssdk + neptunegraph + + + software.amazon.awssdk + apache-client + 2.25.38 + + + + software.amazon.awssdk + secretsmanager + + + com.google.code.gson + gson + 2.10.1 + + + software.amazon.awssdk + sns + + + software.amazon.awssdk + sqs + + + software.amazon.awssdk + ssooidc + + + software.amazon.awssdk + sso + + + software.amazon.awssdk + iam-policy-builder + + + software.amazon.awssdk + sts + + + org.apache.logging.log4j + log4j-core + + + org.slf4j + slf4j-api + 2.0.13 + + + org.apache.logging.log4j + log4j-slf4j2-impl + + + org.apache.logging.log4j + log4j-1.2-api + + + diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/HelloNeptune.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/HelloNeptune.java new file mode 100644 index 00000000000..f7cf30b465f --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/HelloNeptune.java @@ -0,0 +1,78 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune; + +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.services.neptune.NeptuneAsyncClient; +import software.amazon.awssdk.services.neptune.model.DescribeDbClustersRequest; +import software.amazon.awssdk.services.neptune.model.DescribeDbClustersResponse; +import java.util.concurrent.CompletableFuture; + +// snippet-start:[neptune.java2.hello.main] +/** + * Before running this Java V2 code example, set up your development + * environment, including your credentials. + * + * For more information, see the following documentation topic: + * + * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html + */ +public class HelloNeptune { + public static void main(String[] args) { + NeptuneAsyncClient neptuneClient = NeptuneAsyncClient.create(); + describeDbCluster(neptuneClient).join(); // This ensures the async code runs to completion + } + + /** + * Describes the Amazon Neptune DB clusters. + * + * @param neptuneClient the Neptune asynchronous client used to make the request + * @return a {@link CompletableFuture} that completes when the operation is finished + */ + public static CompletableFuture describeDbCluster(NeptuneAsyncClient neptuneClient) { + DescribeDbClustersRequest request = DescribeDbClustersRequest.builder() + .maxRecords(20) + .build(); + + SdkPublisher paginator = neptuneClient.describeDBClustersPaginator(request); + CompletableFuture future = new CompletableFuture<>(); + + paginator.subscribe(new Subscriber() { + private Subscription subscription; + + @Override + public void onSubscribe(Subscription s) { + this.subscription = s; + s.request(Long.MAX_VALUE); // request all items + } + + @Override + public void onNext(DescribeDbClustersResponse response) { + response.dbClusters().forEach(cluster -> { + System.out.println("Cluster Identifier: " + cluster.dbClusterIdentifier()); + System.out.println("Status: " + cluster.status()); + }); + } + + @Override + public void onError(Throwable t) { + future.completeExceptionally(t); + } + + @Override + public void onComplete() { + future.complete(null); + } + }); + + return future.whenComplete((result, throwable) -> { + neptuneClient.close(); + if (throwable != null) { + System.err.println("Error describing DB clusters: " + throwable.getMessage()); + } + }); + } +}// snippet-end:[neptune.java2.hello.main] \ No newline at end of file diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/analytics/CreateNeptuneGraphExample.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/analytics/CreateNeptuneGraphExample.java new file mode 100644 index 00000000000..db3d453322b --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/analytics/CreateNeptuneGraphExample.java @@ -0,0 +1,82 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.analytics; + +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunegraph.NeptuneGraphClient; +import software.amazon.awssdk.services.neptunegraph.model.CreateGraphRequest; +import software.amazon.awssdk.services.neptunegraph.model.CreateGraphResponse; +import software.amazon.awssdk.services.neptunegraph.model.NeptuneGraphException; + +/** + * This Java example demonstrates how to query Amazon Neptune Analytics (Neptune Graph) using the AWS SDK for Java V2. + * + * VPC NETWORKING REQUIREMENT: + * ---------------------------------------------------------------------- + * Amazon Neptune Analytics must be accessed from within an Amazon VPC. This means: + * + * 1. Your application must run within a VPC environment such as EC2, Lambda, ECS, Cloud9, or an AWS managed notebook. + * 2. You **cannot run this code from your local machine** unless you are connected via a VPN or Direct Connect. + * 3. Ensure that your Neptune Graph cluster endpoint is accessible and security groups allow inbound access from your client. + * 4. Always use the HTTPS endpoint when setting the `endpointOverride()` value. + * + * You can test access by running: + * curl https://:8182/status + * ---------------------------------------------------------------------- + */ + +public class CreateNeptuneGraphExample { + + public static void main(String[] args) { + Region region = Region.US_EAST_1; + String graphName = "sample-analytics-graph"; + + // Create the NeptuneGraph client + NeptuneGraphClient client = NeptuneGraphClient.builder() + .region(region) + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); + + executeCreateGraph(client, graphName); + } + + // snippet-start:[neptune.java2.graph.create.main] + /** + * Executes the process of creating a new Neptune graph. + * + * @param client the Neptune graph client used to interact with the Neptune service + * @param graphName the name of the graph to be created + * @throws NeptuneGraphException if an error occurs while creating the graph + */ + public static void executeCreateGraph(NeptuneGraphClient client, String graphName) { + try { + // Create the graph request + CreateGraphRequest request = CreateGraphRequest.builder() + .graphName(graphName) + .provisionedMemory(16) + .build(); + + // Create the graph + CreateGraphResponse response = client.createGraph(request); + + // Extract the graph name and ARN + String createdGraphName = response.name(); + String graphArn = response.arn(); + String graphEndpoint = response.endpoint(); + + System.out.println("Graph created successfully!"); + System.out.println("Graph Name: " + createdGraphName); + System.out.println("Graph ARN: " + graphArn); + System.out.println("Graph Endpoint: " +graphEndpoint ); + + } catch (NeptuneGraphException e) { + System.err.println("Failed to create graph: " + e.awsErrorDetails().errorMessage()); + } finally { + client.close(); + } + } + // snippet-end:[neptune.java2.graph.create.main] +} + diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/analytics/NeptuneAnalyticsQueryExample.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/analytics/NeptuneAnalyticsQueryExample.java new file mode 100644 index 00000000000..c9a695e86bb --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/analytics/NeptuneAnalyticsQueryExample.java @@ -0,0 +1,103 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.analytics; + +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunegraph.NeptuneGraphClient; +import software.amazon.awssdk.services.neptunegraph.model.ExecuteQueryRequest; +import software.amazon.awssdk.services.neptunegraph.model.ExecuteQueryResponse; +import software.amazon.awssdk.services.neptunegraph.model.NeptuneGraphException; +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.stream.Collectors; + +/** + * This Java example demonstrates how to query Amazon Neptune Analytics (Neptune Graph) using the AWS SDK for Java V2. + * + * VPC NETWORKING REQUIREMENT: + * ---------------------------------------------------------------------- + * Amazon Neptune Analytics must be accessed from within an Amazon VPC. This means: + * + * 1. Your application must run within a VPC environment such as EC2, Lambda, ECS, Cloud9, or an AWS managed notebook. + * 2. You **cannot run this code from your local machine** unless you are connected via a VPN or Direct Connect. + * 3. Ensure that your Neptune Graph cluster endpoint is accessible and security groups allow inbound access from your client. + * 4. Always use the HTTPS endpoint when setting the `endpointOverride()` value. + * + * You can test access by running: + * curl https://:8182/status + * ---------------------------------------------------------------------- + */ + +public class NeptuneAnalyticsQueryExample { + + public static void main(String[] args) { + + // Replace with your Neptune Analytics graph endpoint (including port 8182) + // You can get the Endpoint value by running CreateNeptuneGraphExample + String neptuneAnalyticsEndpoint = "https://:8182"; + String graphId = ""; + + NeptuneGraphClient client = NeptuneGraphClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(URI.create(neptuneAnalyticsEndpoint)) + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofSeconds(10)) + .socketTimeout(Duration.ofSeconds(0)) // No socket timeout (read_timeout=None) + ) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .apiCallAttemptTimeout(Duration.ofSeconds(0)) // No total timeout + .retryPolicy(b -> b.numRetries(0)) // Disable retries (total_max_attempts=1) + .build()) + .build(); + + executeGremlinProfileQuery(client, graphId); + } + + // snippet-start:[neptune.java2.graph.execute.main] + /** + * Executes a Gremlin profile query on the Neptune Analytics graph. + * + * @param client the {@link NeptuneGraphClient} instance to use for the query + * @param graphId the identifier of the graph to execute the query on + * + * @throws NeptuneGraphException if an error occurs while executing the query on the Neptune Graph + * @throws Exception if an unexpected error occurs + */ + public static void executeGremlinProfileQuery(NeptuneGraphClient client, String graphId) { + + try { + System.out.println("Running openCypher query on Neptune Analytics..."); + + ExecuteQueryRequest request = ExecuteQueryRequest.builder() + .graphIdentifier(graphId) + .queryString("MATCH (n {code: 'ANC'}) RETURN n") + .language("OPEN_CYPHER") + .build(); + + ResponseInputStream response = client.executeQuery(request); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(response, StandardCharsets.UTF_8))) { + String result = reader.lines().collect(Collectors.joining("\n")); + System.out.println("Query Result:"); + System.out.println(result); + } catch (Exception e) { + System.err.println("Error reading response: " + e.getMessage()); + } + + } catch (NeptuneGraphException e) { + System.err.println("NeptuneGraph error: " + e.awsErrorDetails().errorMessage()); + } catch (Exception e) { + System.err.println("Unexpected error: " + e.getMessage()); + } finally { + client.close(); + } + } + // snippet-end:[neptune.java2.graph.execute.main] +} + diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/database/GremlinProfileQueryExample.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/GremlinProfileQueryExample.java new file mode 100644 index 00000000000..1f85706a97e --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/GremlinProfileQueryExample.java @@ -0,0 +1,86 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.database; + +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunedata.NeptunedataClient; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinProfileQueryRequest; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinProfileQueryResponse; +import software.amazon.awssdk.services.neptunedata.model.NeptunedataException; +import java.net.URI; +import java.time.Duration; + +/** + * Example: Running a Gremlin Profile query using the AWS SDK for Java V2. + * + * ---------------------------------------------------------------------------------- + * VPC Networking Requirement: + * ---------------------------------------------------------------------------------- + * Amazon Neptune must be accessed from **within the same VPC** as the Neptune cluster. + * It does not expose a public endpoint, so this code must be executed from: + * + * - An **AWS Lambda function** configured to run inside the same VPC + * - An **EC2 instance** or **ECS task** running in the same VPC + * - A connected environment such as a **VPN**, **AWS Direct Connect**, or a **peered VPC** + * + * To see an example, see Creating an AWS Lambda function that queries Neptune graph data within the VPC + * in the AWS Code Library. + * + */ +public class GremlinProfileQueryExample { + + // Specify the endpoint. You can obtain an endpoint by running + // the main scenario. + private static final String NEPTUNE_ENDPOINT = "https://:8182"; + + public static void main(String[] args) { + NeptunedataClient client = NeptunedataClient.builder() + .credentialsProvider(DefaultCredentialsProvider.create()) + .region(Region.US_EAST_1) + .endpointOverride(URI.create(NEPTUNE_ENDPOINT)) + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofSeconds(10)) + .socketTimeout(Duration.ofSeconds(30))) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .apiCallAttemptTimeout(Duration.ofSeconds(30)) + .build()) + .build(); + + try { + executeGremlinProfileQuery(client); + } catch (NeptunedataException e) { + System.err.println("Neptune error: " + e.awsErrorDetails().errorMessage()); + } catch (Exception e) { + System.err.println("Unexpected error: " + e.getMessage()); + } finally { + client.close(); + } + } + + // snippet-start:[neptune.java2.data.query.gremlin.profile.main] + /** + * Executes a Gremlin PROFILE query using the provided NeptunedataClient. + * + * @param client The NeptunedataClient instance to be used for executing the Gremlin PROFILE query. + */ + private static void executeGremlinProfileQuery(NeptunedataClient client) { + System.out.println("Executing Gremlin PROFILE query..."); + + ExecuteGremlinProfileQueryRequest request = ExecuteGremlinProfileQueryRequest.builder() + .gremlinQuery("g.V().has('code', 'ANC')") + .build(); + + ExecuteGremlinProfileQueryResponse response = client.executeGremlinProfileQuery(request); + if (response.output() != null) { + System.out.println("Query Profile Output:"); + System.out.println(response.output()); + } else { + System.out.println("No output returned from the profile query."); + } + } + // snippet-end:[neptune.java2.data.query.gremlin.profile.main] +} \ No newline at end of file diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/database/NeptuneGremlinExplainAndProfileExample.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/NeptuneGremlinExplainAndProfileExample.java new file mode 100644 index 00000000000..87e8ebbaea8 --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/NeptuneGremlinExplainAndProfileExample.java @@ -0,0 +1,117 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.database; + +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunedata.NeptunedataClient; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinExplainQueryRequest; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinExplainQueryResponse; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinProfileQueryRequest; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinProfileQueryResponse; +import software.amazon.awssdk.services.neptunedata.model.NeptunedataException; +import java.net.URI; +import java.time.Duration; + +/** + * This example demonstrates how to run a Gremlin Explain and Profile query on an Amazon Neptune database + * using the AWS SDK for Java V2. + * + * VPC NETWORKING REQUIREMENT: + * ---------------------------------------------------------------------- + * Amazon Neptune must be accessed from **within the same VPC** as the Neptune cluster. + * It does not expose a public endpoint, so this code must be executed from: + * + * - An **AWS Lambda function** configured to run inside the same VPC + * - An **EC2 instance** or **ECS task** running in the same VPC + * - A connected environment such as a **VPN**, **AWS Direct Connect**, or a **peered VPC** + * + * To see an example, see Creating an AWS Lambda function that queries Neptune graph data within the VPC + * in the AWS Code Library. + * + */ +public class NeptuneGremlinExplainAndProfileExample { + // Specify the endpoint. You can obtain an endpoint by running + // the main scenario. + private static final String NEPTUNE_ENDPOINT = "https://[Specify-Your-Endpoint]:8182"; + + public static void main(String[] args) { + NeptunedataClient client = NeptunedataClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(URI.create(NEPTUNE_ENDPOINT)) + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofSeconds(10)) + .socketTimeout(Duration.ofSeconds(30))) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .apiCallAttemptTimeout(Duration.ofSeconds(30)) + .build()) + .build(); + + executeGremlinExplainQuery(client); + } + + /** + * Executes a Gremlin explain query and a Gremlin profile query using the provided Neptune data client. + * + * @param client the Neptune data client to use for executing the Gremlin queries + * @throws NeptunedataException if an error occurs while executing the Gremlin queries on the Neptune data client + * @throws Exception if an unexpected error occurs during the execution + */ + public static void executeGremlinExplainQuery(NeptunedataClient client) { + try { + runExplainQuery(client); + runProfileQuery(client); + } catch (NeptunedataException e) { + System.err.println("Neptune error: " + e.awsErrorDetails().errorMessage()); + } catch (Exception e) { + System.err.println("Unexpected error: " + e.getMessage()); + } finally { + client.close(); + } + } + + /** + * Runs an EXPLAIN query on the Neptune graph database using the provided NeptunedataClient. + * + * @param client The NeptunedataClient instance to use for executing the EXPLAIN query. + */ + private static void runExplainQuery(NeptunedataClient client) { + System.out.println("Running Gremlin EXPLAIN query..."); + ExecuteGremlinExplainQueryRequest explainRequest = ExecuteGremlinExplainQueryRequest.builder() + .gremlinQuery("g.V().has('code', 'ANC')") + .build(); + + ExecuteGremlinExplainQueryResponse explainResponse = client.executeGremlinExplainQuery(explainRequest); + + System.out.println("Explain Query Result:"); + if (explainResponse.output() != null) { + System.out.println(explainResponse.output()); + } else { + System.out.println("No explain output returned."); + } + } + + /** + * Runs a Gremlin PROFILE query using the provided NeptunedataClient instance. + * + * @param client the NeptunedataClient instance to use for executing the Gremlin query + */ + private static void runProfileQuery(NeptunedataClient client) { + System.out.println("Running Gremlin PROFILE query..."); + + ExecuteGremlinProfileQueryRequest profileRequest = ExecuteGremlinProfileQueryRequest.builder() + .gremlinQuery("g.V().has('code', 'ANC')") + .build(); + + ExecuteGremlinProfileQueryResponse profileResponse = client.executeGremlinProfileQuery(profileRequest); + + System.out.println("Profile Query Result:"); + if (profileResponse.output() != null) { + System.out.println(profileResponse.output()); + } else { + System.out.println("No profile output returned."); + } + } +} diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/database/NeptuneGremlinQueryExample.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/NeptuneGremlinQueryExample.java new file mode 100644 index 00000000000..4da409d8474 --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/NeptuneGremlinQueryExample.java @@ -0,0 +1,86 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.database; + +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunedata.NeptunedataClient; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinQueryRequest; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinQueryResponse; +import software.amazon.awssdk.services.neptunedata.model.NeptunedataException; +import java.net.URI; +import java.time.Duration; + +/** + * This example demonstrates how to execute a Gremlin query on an Amazon Neptune database using the AWS SDK for Java V2. + * + * VPC NETWORKING REQUIREMENT: + * ---------------------------------------------------------------------- + * Amazon Neptune must be accessed from **within the same VPC** as the Neptune cluster. + * It does not expose a public endpoint, so this code must be executed from: + * + * - An **AWS Lambda function** configured to run inside the same VPC + * - An **EC2 instance** or **ECS task** running in the same VPC + * - A connected environment such as a **VPN**, **AWS Direct Connect**, or a **peered VPC** + * + * To see an example, see Creating an AWS Lambda function that queries Neptune graph data within the VPC + * in the AWS Code Library. + * + */ + +public class NeptuneGremlinQueryExample { + + public static void main(String[] args) { + // Specify the endpoint. You can obtain an endpoint by running + // the main scenario. + String neptuneEndpoint = "https://[Specify Endpoint]:8182"; + + NeptunedataClient client = NeptunedataClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(URI.create(neptuneEndpoint)) + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofSeconds(10)) + .socketTimeout(Duration.ofSeconds(30))) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .apiCallAttemptTimeout(Duration.ofSeconds(30)) + .build()) + .build(); + } + + // snippet-start:[neptune.java2.data.query.gremlin.main] + /** + * Executes a Gremlin query against an Amazon Neptune database using the provided {@link NeptunedataClient}. + * + * @param client the {@link NeptunedataClient} instance to use for executing the Gremlin query + */ + public static void executeGremlinQuery(NeptunedataClient client) { + try { + System.out.println("Querying Neptune..."); + ExecuteGremlinQueryRequest request = ExecuteGremlinQueryRequest.builder() + .gremlinQuery("g.V().has('code', 'ANC')") + .build(); + + ExecuteGremlinQueryResponse response = client.executeGremlinQuery(request); + + System.out.println("Full Response:"); + System.out.println(response); + + // Retrieve and print the result + if (response.result() != null) { + System.out.println("Query Result:"); + System.out.println(response.result().toString()); + } else { + System.out.println("No result returned from the query."); + } + } catch (NeptunedataException e) { + System.err.println("Error calling Neptune: " + e.awsErrorDetails().errorMessage()); + } catch (Exception e) { + System.err.println("Unexpected error: " + e.getMessage()); + } finally { + client.close(); + } + } + // snippet-end:[neptune.java2.data.query.gremlin.main] +} diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/database/OpenCypherExplainExample.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/OpenCypherExplainExample.java new file mode 100644 index 00000000000..51c8af264dd --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/database/OpenCypherExplainExample.java @@ -0,0 +1,84 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.database; + +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunedata.NeptunedataClient; +import software.amazon.awssdk.services.neptunedata.model.ExecuteOpenCypherExplainQueryRequest; +import software.amazon.awssdk.services.neptunedata.model.ExecuteOpenCypherExplainQueryResponse; +import software.amazon.awssdk.services.neptunedata.model.NeptunedataException; +import java.net.URI; +import java.time.Duration; + +/** + * Example: Running an OpenCypher EXPLAIN query on Amazon Neptune using AWS SDK for Java V2. + * + * ------------------------------------------------------------------------------ + * VPC NETWORKING REQUIREMENT: + * ------------------------------------------------------------------------------ + * Amazon Neptune must be accessed from **within the same VPC** as the Neptune cluster. + * It does not expose a public endpoint, so this code must be executed from: + * + * - An **AWS Lambda function** configured to run inside the same VPC + * - An **EC2 instance** or **ECS task** running in the same VPC + * - A connected environment such as a **VPN**, **AWS Direct Connect**, or a **peered VPC** + * + * To see an example, see Creating an AWS Lambda function that queries Neptune graph data within the VPC + * in the AWS Code Library. + * + */ +public class OpenCypherExplainExample { + + private static final String NEPTUNE_ENDPOINT = "https://:8182"; + + public static void main(String[] args) { + NeptunedataClient client = NeptunedataClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(URI.create(NEPTUNE_ENDPOINT)) + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofSeconds(10)) + .socketTimeout(Duration.ofSeconds(30))) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .apiCallAttemptTimeout(Duration.ofSeconds(30)) + .build()) + .build(); + + executeGremlinQuery(client); + } + + // snippet-start:[neptune.java2.data.query.opencypher.main] + /** + * Executes an OpenCypher EXPLAIN query using the provided Neptune data client. + * + * @param client The Neptune data client to use for the query execution. + */ + public static void executeGremlinQuery(NeptunedataClient client) { + try { + System.out.println("Executing OpenCypher EXPLAIN query..."); + ExecuteOpenCypherExplainQueryRequest request = ExecuteOpenCypherExplainQueryRequest.builder() + .openCypherQuery("MATCH (n {code: 'ANC'}) RETURN n") + .explainMode("debug") + .build(); + + ExecuteOpenCypherExplainQueryResponse response = client.executeOpenCypherExplainQuery(request); + + if (response.results() != null) { + System.out.println("Explain Results:"); + System.out.println(response.results().asUtf8String()); + } else { + System.out.println("No explain results returned."); + } + + } catch (NeptunedataException e) { + System.err.println("Neptune error: " + e.awsErrorDetails().errorMessage()); + } catch (Exception e) { + System.err.println("Unexpected error: " + e.getMessage()); + } finally { + client.close(); + } + } + // snippet-end:[neptune.java2.data.query.opencypher.main] +} diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/scenerio/NeptuneActions.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/scenerio/NeptuneActions.java new file mode 100644 index 00000000000..4e2f353b230 --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/scenerio/NeptuneActions.java @@ -0,0 +1,592 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.scenerio; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeSubnetsRequest; +import software.amazon.awssdk.services.ec2.model.DescribeSubnetsResponse; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsRequest; +import software.amazon.awssdk.services.ec2.model.Filter; +import software.amazon.awssdk.services.ec2.model.DescribeVpcsResponse; +import software.amazon.awssdk.services.ec2.model.Subnet; +import software.amazon.awssdk.services.ec2.model.Vpc; +import software.amazon.awssdk.services.neptune.NeptuneAsyncClient; +import software.amazon.awssdk.services.neptune.NeptuneClient; +import software.amazon.awssdk.services.neptune.model.*; +import software.amazon.awssdk.services.neptune.model.CreateDbClusterRequest; +import software.amazon.awssdk.services.neptune.model.CreateDbInstanceRequest; +import software.amazon.awssdk.services.neptune.model.CreateDbSubnetGroupRequest; +import software.amazon.awssdk.services.neptune.model.DBCluster; +import software.amazon.awssdk.services.neptune.model.DBInstance; +import software.amazon.awssdk.services.neptune.model.DeleteDbClusterRequest; +import software.amazon.awssdk.services.neptune.model.DeleteDbInstanceRequest; +import software.amazon.awssdk.services.neptune.model.DeleteDbSubnetGroupRequest; +import software.amazon.awssdk.services.neptune.model.DescribeDbInstancesRequest; +import software.amazon.awssdk.services.neptune.model.DescribeDbClustersRequest; +import software.amazon.awssdk.services.neptunegraph.model.ServiceQuotaExceededException; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +// snippet-start:[neptune.java2.actions.main] +public class NeptuneActions { + private CompletableFuture instanceCheckFuture; + private static NeptuneAsyncClient neptuneAsyncClient; + private final Region region = Region.US_EAST_1; + private static final Logger logger = LoggerFactory.getLogger(NeptuneActions.class); + private final NeptuneClient neptuneClient = NeptuneClient.builder().region(region).build(); + + /** + * Retrieves an instance of the NeptuneAsyncClient. + *

+ * This method initializes and returns a singleton instance of the NeptuneAsyncClient. The client + * is configured with the following settings: + *

    + *
  • Maximum concurrency: 100
  • + *
  • Connection timeout: 60 seconds
  • + *
  • Read timeout: 60 seconds
  • + *
  • Write timeout: 60 seconds
  • + *
  • API call timeout: 2 minutes
  • + *
  • API call attempt timeout: 90 seconds
  • + *
  • Retry strategy: STANDARD
  • + *
+ * The client is built using the NettyNioAsyncHttpClient. + * + * @return the singleton instance of the NeptuneAsyncClient + */ + private static NeptuneAsyncClient getAsyncClient() { + if (neptuneAsyncClient == null) { + SdkAsyncHttpClient httpClient = NettyNioAsyncHttpClient.builder() + .maxConcurrency(100) + .connectionTimeout(Duration.ofSeconds(60)) + .readTimeout(Duration.ofSeconds(60)) + .writeTimeout(Duration.ofSeconds(60)) + .build(); + + ClientOverrideConfiguration overrideConfig = ClientOverrideConfiguration.builder() + .apiCallTimeout(Duration.ofMinutes(2)) + .apiCallAttemptTimeout(Duration.ofSeconds(90)) + .retryStrategy(RetryMode.STANDARD) + .build(); + + neptuneAsyncClient = NeptuneAsyncClient.builder() + .httpClient(httpClient) + .overrideConfiguration(overrideConfig) + .build(); + } + return neptuneAsyncClient; + } + + /** + * Asynchronously deletes a set of Amazon Neptune resources in a defined order. + *

+ * The method performs the following operations in sequence: + *

    + *
  1. Deletes the Neptune DB instance identified by {@code dbInstanceId}.
  2. + *
  3. Waits until the DB instance is fully deleted.
  4. + *
  5. Deletes the Neptune DB cluster identified by {@code dbClusterId}.
  6. + *
  7. Deletes the Neptune DB subnet group identified by {@code subnetGroupName}.
  8. + *
+ *

+ * If any step fails, the subsequent operations are not performed, and the exception + * is logged. This method blocks the calling thread until all operations complete. + * + * @param dbInstanceId the ID of the Neptune DB instance to delete + * @param dbClusterId the ID of the Neptune DB cluster to delete + * @param subnetGroupName the name of the Neptune DB subnet group to delete + */ + public void deleteNeptuneResourcesAsync(String dbInstanceId, String dbClusterId, String subnetGroupName) { + deleteDBInstanceAsync(dbInstanceId) + .thenCompose(v -> waitUntilInstanceDeletedAsync(dbInstanceId)) + .thenCompose(v -> deleteDBClusterAsync(dbClusterId)) + .thenCompose(v -> deleteDBSubnetGroupAsync(subnetGroupName)) + .whenComplete((v, ex) -> { + if (ex != null) { + logger.info("Failed to delete Neptune resources: " + ex.getMessage()); + } else { + logger.info("Neptune resources deleted successfully."); + } + }) + .join(); // Waits for the entire async chain to complete + } + + // snippet-start:[neptune.java2.delete.subnet.group.main] + /** + * Deletes a subnet group. + * + * @param subnetGroupName the identifier of the subnet group to delete + * @return a {@link CompletableFuture} that completes when the cluster has been deleted + */ + public CompletableFuture deleteDBSubnetGroupAsync(String subnetGroupName) { + DeleteDbSubnetGroupRequest request = DeleteDbSubnetGroupRequest.builder() + .dbSubnetGroupName(subnetGroupName) + .build(); + + return getAsyncClient().deleteDBSubnetGroup(request) + .thenAccept(response -> logger.info("🗑️ Deleting Subnet Group: " + subnetGroupName)); + } + // snippet-end:[neptune.java2.delete.subnet.group.main] + + // snippet-start:[neptune.java2.delete.cluster.main] + /** + * Deletes a DB instance asynchronously. + * + * @param clusterId the identifier of the cluster to delete + * @return a {@link CompletableFuture} that completes when the cluster has been deleted + */ + public CompletableFuture deleteDBClusterAsync(String clusterId) { + DeleteDbClusterRequest request = DeleteDbClusterRequest.builder() + .dbClusterIdentifier(clusterId) + .skipFinalSnapshot(true) + .build(); + + return getAsyncClient().deleteDBCluster(request) + .thenAccept(response -> System.out.println("🗑️ Deleting DB Cluster: " + clusterId)); + } + // snippet-end:[neptune.java2.delete.cluster.main] + + public CompletableFuture waitUntilInstanceDeletedAsync(String instanceId) { + CompletableFuture future = new CompletableFuture<>(); + long startTime = System.currentTimeMillis(); + checkInstanceDeletedRecursive(instanceId, startTime, future); + return future; + } + + // snippet-start:[neptune.java2.delete.instance.main] + /** + * Deletes a DB instance asynchronously. + * + * @param instanceId the identifier of the DB instance to be deleted + * @return a {@link CompletableFuture} that completes when the DB instance has been deleted + */ + public CompletableFuture deleteDBInstanceAsync(String instanceId) { + DeleteDbInstanceRequest request = DeleteDbInstanceRequest.builder() + .dbInstanceIdentifier(instanceId) + .skipFinalSnapshot(true) + .build(); + + return getAsyncClient().deleteDBInstance(request) + .thenAccept(response -> System.out.println("🗑️ Deleting DB Instance: " + instanceId)); + } + // snippet-end:[neptune.java2.delete.instance.main] + + + private void checkInstanceDeletedRecursive(String instanceId, long startTime, CompletableFuture future) { + DescribeDbInstancesRequest request = DescribeDbInstancesRequest.builder() + .dbInstanceIdentifier(instanceId) + .build(); + + getAsyncClient().describeDBInstances(request) + .whenComplete((response, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof NeptuneException && + ((NeptuneException) cause).awsErrorDetails().errorCode().equals("DBInstanceNotFound")) { + long elapsed = (System.currentTimeMillis() - startTime) / 1000; + logger.info("\r Instance %s deleted after %ds%n", instanceId, elapsed); + future.complete(null); + return; + } + future.completeExceptionally(new CompletionException("Error polling DB instance", cause)); + return; + } + + String status = response.dbInstances().get(0).dbInstanceStatus(); + long elapsed = (System.currentTimeMillis() - startTime) / 1000; + System.out.printf("\r Waiting: Instance %s status: %-10s (%ds elapsed)", instanceId, status, elapsed); + System.out.flush(); + + CompletableFuture.delayedExecutor(20, TimeUnit.SECONDS) + .execute(() -> checkInstanceDeletedRecursive(instanceId, startTime, future)); + }); + } + + + public void waitForClusterStatus(String clusterId, String desiredStatus) { + System.out.printf("Waiting for cluster '%s' to reach status '%s'...\n", clusterId, desiredStatus); + CompletableFuture future = new CompletableFuture<>(); + checkClusterStatusRecursive(clusterId, desiredStatus, System.currentTimeMillis(), future); + future.join(); + } + + private void checkClusterStatusRecursive(String clusterId, String desiredStatus, long startTime, CompletableFuture future) { + DescribeDbClustersRequest request = DescribeDbClustersRequest.builder() + .dbClusterIdentifier(clusterId) + .build(); + + getAsyncClient().describeDBClusters(request) + .whenComplete((response, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + future.completeExceptionally( + new CompletionException("Error checking Neptune cluster status", cause) + ); + return; + } + + List clusters = response.dbClusters(); + if (clusters.isEmpty()) { + future.completeExceptionally(new RuntimeException("Cluster not found: " + clusterId)); + return; + } + + String currentStatus = clusters.get(0).status(); + long elapsedSeconds = (System.currentTimeMillis() - startTime) / 1000; + System.out.printf("\r Elapsed: %-20s Cluster status: %-20s", formatElapsedTime((int) elapsedSeconds), currentStatus); + System.out.flush(); + + if (desiredStatus.equalsIgnoreCase(currentStatus)) { + System.out.printf("\r Neptune cluster reached desired status '%s' after %s.\n", desiredStatus, formatElapsedTime((int) elapsedSeconds)); + future.complete(null); + } else { + CompletableFuture.delayedExecutor(20, TimeUnit.SECONDS) + .execute(() -> checkClusterStatusRecursive(clusterId, desiredStatus, startTime, future)); + } + }); + } + + + // snippet-start:[neptune.java2.start.cluster.main] + /** + * Starts an Amazon Neptune DB cluster. + * + * @param clusterIdentifier the unique identifier of the DB cluster to be stopped + */ + public CompletableFuture startDBClusterAsync(String clusterIdentifier) { + StartDbClusterRequest clusterRequest = StartDbClusterRequest.builder() + .dbClusterIdentifier(clusterIdentifier) + .build(); + + return getAsyncClient().startDBCluster(clusterRequest) + .whenComplete((response, error) -> { + if (error != null) { + Throwable cause = error.getCause() != null ? error.getCause() : error; + + if (cause instanceof ResourceNotFoundException) { + throw (ResourceNotFoundException) cause; + } + + throw new RuntimeException("Failed to start DB cluster: " + cause.getMessage(), cause); + } else { + logger.info("DB Cluster starting: " + clusterIdentifier); + } + }); + } + // snippet-end:[neptune.java2.start.cluster.main] + + // snippet-start:[neptune.java2.stop.cluster.main] + /** + * Stops an Amazon Neptune DB cluster. + * + * @param clusterIdentifier the unique identifier of the DB cluster to be stopped + */ + public CompletableFuture stopDBClusterAsync(String clusterIdentifier) { + StopDbClusterRequest clusterRequest = StopDbClusterRequest.builder() + .dbClusterIdentifier(clusterIdentifier) + .build(); + + return getAsyncClient().stopDBCluster(clusterRequest) + .whenComplete((response, error) -> { + if (error != null) { + Throwable cause = error.getCause() != null ? error.getCause() : error; + + if (cause instanceof ResourceNotFoundException) { + throw (ResourceNotFoundException) cause; + } + + throw new RuntimeException("Failed to stop DB cluster: " + cause.getMessage(), cause); + } else { + logger.info("DB Cluster stopped: " + clusterIdentifier); + } + }); + } + + // snippet-end:[neptune.java2.stop.cluster.main] + + // snippet-start:[neptune.java2.describe.cluster.main] + + /** + * Asynchronously describes the specified Amazon RDS DB cluster. + * + * @param clusterId the identifier of the DB cluster to describe + * @return a {@link CompletableFuture} that completes when the operation is done, or throws a {@link RuntimeException} + * if an error occurs + */ + public CompletableFuture describeDBClustersAsync(String clusterId) { + DescribeDbClustersRequest request = DescribeDbClustersRequest.builder() + .dbClusterIdentifier(clusterId) + .build(); + + return getAsyncClient().describeDBClusters(request) + .thenAccept(response -> { + for (DBCluster cluster : response.dbClusters()) { + logger.info("Cluster Identifier: " + cluster.dbClusterIdentifier()); + logger.info("Status: " + cluster.status()); + logger.info("Engine: " + cluster.engine()); + logger.info("Engine Version: " + cluster.engineVersion()); + logger.info("Endpoint: " + cluster.endpoint()); + logger.info("Reader Endpoint: " + cluster.readerEndpoint()); + logger.info("Availability Zones: " + cluster.availabilityZones()); + logger.info("Subnet Group: " + cluster.dbSubnetGroup()); + logger.info("VPC Security Groups:"); + cluster.vpcSecurityGroups().forEach(vpcGroup -> + logger.info(" - " + vpcGroup.vpcSecurityGroupId())); + logger.info("Storage Encrypted: " + cluster.storageEncrypted()); + logger.info("IAM DB Auth Enabled: " + cluster.iamDatabaseAuthenticationEnabled()); + logger.info("Backup Retention Period: " + cluster.backupRetentionPeriod() + " days"); + logger.info("Preferred Backup Window: " + cluster.preferredBackupWindow()); + logger.info("Preferred Maintenance Window: " + cluster.preferredMaintenanceWindow()); + logger.info("------"); + } + }) + .exceptionally(ex -> { + Throwable cause = ex.getCause() != null ? ex.getCause() : ex; + + if (cause instanceof ResourceNotFoundException) { + throw (ResourceNotFoundException) cause; + } + + throw new RuntimeException("Failed to describe the DB cluster: " + cause.getMessage(), cause); + }); + } + // snippet-end:[neptune.java2.describe.cluster.main] + + + public CompletableFuture checkInstanceStatus(String instanceId, String desiredStatus) { + CompletableFuture future = new CompletableFuture<>(); + long startTime = System.currentTimeMillis(); + checkStatusRecursive(instanceId, desiredStatus.toLowerCase(), startTime, future); + return future; + } + + // snippet-start:[neptune.java2.describe.dbinstance.main] + /** + * Checks the status of a Neptune instance recursively until the desired status is reached or a timeout occurs. + * + * @param instanceId the ID of the Neptune instance to check + * @param desiredStatus the desired status of the Neptune instance + * @param startTime the start time of the operation, used to calculate the elapsed time + * @param future a {@link CompletableFuture} that will be completed when the desired status is reached + */ + private void checkStatusRecursive(String instanceId, String desiredStatus, long startTime, CompletableFuture future) { + DescribeDbInstancesRequest request = DescribeDbInstancesRequest.builder() + .dbInstanceIdentifier(instanceId) + .build(); + + getAsyncClient().describeDBInstances(request) + .whenComplete((response, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + future.completeExceptionally( + new CompletionException("Error checking Neptune instance status", cause) + ); + return; + } + + List instances = response.dbInstances(); + if (instances.isEmpty()) { + future.completeExceptionally(new RuntimeException("Instance not found: " + instanceId)); + return; + } + + String currentStatus = instances.get(0).dbInstanceStatus(); + long elapsedSeconds = (System.currentTimeMillis() - startTime) / 1000; + System.out.printf("\r Elapsed: %-20s Status: %-20s", formatElapsedTime((int) elapsedSeconds), currentStatus); + System.out.flush(); + + if (desiredStatus.equalsIgnoreCase(currentStatus)) { + System.out.printf("\r Neptune instance reached desired status '%s' after %s.\n", desiredStatus, formatElapsedTime((int) elapsedSeconds)); + future.complete(null); + } else { + CompletableFuture.delayedExecutor(20, TimeUnit.SECONDS) + .execute(() -> checkStatusRecursive(instanceId, desiredStatus, startTime, future)); + } + }); + } + // snippet-end:[neptune.java2.describe.dbinstance.main] + + + private String formatElapsedTime(int seconds) { + int minutes = seconds / 60; + int remainingSeconds = seconds % 60; + + if (minutes > 0) { + return minutes + (minutes == 1 ? " min" : " mins") + ", " + + remainingSeconds + (remainingSeconds == 1 ? " sec" : " secs"); + } else { + return remainingSeconds + (remainingSeconds == 1 ? " sec" : " secs"); + } + } + + // snippet-start:[neptune.java2.create.dbinstance.main] + + /** + * Creates a new Amazon Neptune DB instance asynchronously. + * + * @param dbInstanceId the identifier for the new DB instance + * @param dbClusterId the identifier for the DB cluster that the new instance will be a part of + * @return a {@link CompletableFuture} that completes with the identifier of the newly created DB instance + * @throws CompletionException if the operation fails, with a cause of either: + * - {@link ServiceQuotaExceededException} if the request would exceed the maximum quota, or + * - a general exception with the failure message + */ + public CompletableFuture createDBInstanceAsync(String dbInstanceId, String dbClusterId) { + CreateDbInstanceRequest request = CreateDbInstanceRequest.builder() + .dbInstanceIdentifier(dbInstanceId) + .dbInstanceClass("db.r5.large") + .engine("neptune") + .dbClusterIdentifier(dbClusterId) + .build(); + + return getAsyncClient().createDBInstance(request) + .whenComplete((response, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof ServiceQuotaExceededException) { + throw new CompletionException("The operation was denied because the request would exceed the maximum quota.", cause); + } + throw new CompletionException("Failed to create Neptune DB instance: " + exception.getMessage(), exception); + } + }) + .thenApply(response -> { + String instanceId = response.dbInstance().dbInstanceIdentifier(); + logger.info("Created Neptune DB Instance: " + instanceId); + return instanceId; + }); + } + // snippet-end:[neptune.java2.create.dbinstance.main] + + // snippet-start:[neptune.java2.create.cluster.main] + + /** + * Creates a new Amazon Neptune DB cluster asynchronously. + * + * @param dbName the name of the DB cluster to be created + * @return a CompletableFuture that, when completed, provides the ID of the created DB cluster + * @throws CompletionException if the operation fails for any reason, including if the request would exceed the maximum quota + */ + public CompletableFuture createDBClusterAsync(String dbName) { + CreateDbClusterRequest request = CreateDbClusterRequest.builder() + .dbClusterIdentifier(dbName) + .engine("neptune") + .deletionProtection(false) + .backupRetentionPeriod(1) + .build(); + + return getAsyncClient().createDBCluster(request) + .whenComplete((response, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof ServiceQuotaExceededException) { + throw new CompletionException("The operation was denied because the request would exceed the maximum quota.", cause); + } + throw new CompletionException("Failed to create Neptune DB cluster: " + exception.getMessage(), exception); + } + }) + .thenApply(response -> { + String clusterId = response.dbCluster().dbClusterIdentifier(); + logger.info("DB Cluster created: " + clusterId); + return clusterId; + }); + } + // snippet-end:[neptune.java2.create.cluster.main] + + // snippet-start:[neptune.java2.create.subnet.main] + + /** + * Creates a new DB subnet group asynchronously. + * + * @param groupName the name of the subnet group to create + * @return a CompletableFuture that, when completed, returns the Amazon Resource Name (ARN) of the created subnet group + * @throws CompletionException if the operation fails, with a cause that may be a ServiceQuotaExceededException if the request would exceed the maximum quota + */ + public CompletableFuture createSubnetGroupAsync(String groupName) { + + // Get the Amazon Virtual Private Cloud (VPC) where the Neptune cluster and resources will be created + String vpcId = getDefaultVpcId(); + logger.info("VPC is : " + vpcId); + + List subnetList = getSubnetIds(vpcId); + for (String subnetId : subnetList) { + System.out.println("Subnet group:" +subnetId); + } + + CreateDbSubnetGroupRequest request = CreateDbSubnetGroupRequest.builder() + .dbSubnetGroupName(groupName) + .dbSubnetGroupDescription("Subnet group for Neptune cluster") + .subnetIds(subnetList) + .build(); + + return getAsyncClient().createDBSubnetGroup(request) + .whenComplete((response, exception) -> { + if (exception != null) { + Throwable cause = exception.getCause(); + if (cause instanceof ServiceQuotaExceededException) { + throw new CompletionException("The operation was denied because the request would exceed the maximum quota.", cause); + } + throw new CompletionException("Failed to create subnet group: " + exception.getMessage(), exception); + } + }) + .thenApply(response -> { + String name = response.dbSubnetGroup().dbSubnetGroupName(); + String arn = response.dbSubnetGroup().dbSubnetGroupArn(); + logger.info("Subnet group created: " + name); + return arn; + }); + } + // snippet-end:[neptune.java2.create.subnet.main] + + private List getSubnetIds(String vpcId) { + try (Ec2Client ec2 = Ec2Client.builder().region(region).build()) { + DescribeSubnetsRequest request = DescribeSubnetsRequest.builder() + .filters(builder -> builder.name("vpc-id").values(vpcId)) + .build(); + + DescribeSubnetsResponse response = ec2.describeSubnets(request); + return response.subnets().stream() + .map(Subnet::subnetId) + .collect(Collectors.toList()); + } + } + + public static String getDefaultVpcId() { + Ec2Client ec2 = Ec2Client.builder() + .region(Region.US_EAST_1) + .build(); + + Filter myFilter = Filter.builder() + .name("isDefault") + .values("true") + .build(); + + List filterList = new ArrayList<>(); + filterList.add(myFilter); + + DescribeVpcsRequest request = DescribeVpcsRequest.builder() + .filters(filterList) + .build(); + + + DescribeVpcsResponse response = ec2.describeVpcs(request); + if (!response.vpcs().isEmpty()) { + Vpc defaultVpc = response.vpcs().get(0); + return defaultVpc.vpcId(); + } else { + throw new RuntimeException("No default VPC found in this region."); + } + } +} +// snippet-end:[neptune.java2.actions.main] \ No newline at end of file diff --git a/javav2/example_code/neptune/src/main/java/com/example/neptune/scenerio/NeptuneScenario.java b/javav2/example_code/neptune/src/main/java/com/example/neptune/scenerio/NeptuneScenario.java new file mode 100644 index 00000000000..e61efdff508 --- /dev/null +++ b/javav2/example_code/neptune/src/main/java/com/example/neptune/scenerio/NeptuneScenario.java @@ -0,0 +1,252 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.example.neptune.scenerio; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.services.neptunegraph.model.ResourceNotFoundException; +import software.amazon.awssdk.services.neptunegraph.model.ServiceQuotaExceededException; + +import java.util.Scanner; +import java.util.concurrent.CompletionException; + +// snippet-start:[neptune.java2.scenario.main] +public class NeptuneScenario { + public static final String DASHES = new String(new char[80]).replace("\0", "-"); + private static final Logger logger = LoggerFactory.getLogger(NeptuneScenario.class); + static Scanner scanner = new Scanner(System.in); + static NeptuneActions neptuneActions = new NeptuneActions(); + + public static void main(String[] args) { + final String usage = + """ + Usage: + + + Where: + subnetGroupName - The name of an existing Neptune DB subnet group that includes subnets in at least two Availability Zones. + clusterName - The unique identifier for the Neptune DB cluster. + dbInstanceId - The identifier for a specific Neptune DB instance within the cluster. + """; + String subnetGroupName = "neptuneSubnetGroup65"; + String clusterName = "neptuneCluster65"; + String dbInstanceId = "neptuneDB65"; + + logger.info(""" + Amazon Neptune is a fully managed graph + database service by AWS, designed specifically + for handling complex relationships and connected + datasets at scale. It supports two popular graph models: + property graphs (via openCypher and Gremlin) and RDF + graphs (via SPARQL). This makes Neptune ideal for + use cases such as knowledge graphs, fraud detection, + social networking, recommendation engines, and + network management, where relationships between + entities are central to the data. + + Being fully managed, Neptune handles database + provisioning, patching, backups, and replication, + while also offering high availability and durability + within AWS's infrastructure. + + For developers, programming with Neptune allows + for building intelligent, relationship-aware + applications that go beyond traditional tabular + databases. Developers can use the AWS SDK for Java + to automate infrastructure operations (via NeptuneClient). + + Let's get started... + """); + waitForInputToContinue(scanner); + runScenario(subnetGroupName, dbInstanceId, clusterName); + } + + public static void runScenario(String subnetGroupName, String dbInstanceId, String clusterName) { + logger.info(DASHES); + logger.info("1. Create a Neptune DB Subnet Group"); + logger.info("The Neptune DB subnet group is used when launching a Neptune cluster"); + waitForInputToContinue(scanner); + try { + neptuneActions.createSubnetGroupAsync(subnetGroupName).join(); + + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof ServiceQuotaExceededException) { + logger.error("The request failed due to service quota exceeded: {}", cause.getMessage()); + } else { + logger.error("An unexpected error occurred.", cause); + } + return; + } + waitForInputToContinue(scanner); + logger.info(DASHES); + + logger.info(DASHES); + logger.info("2. Create a Neptune Cluster"); + logger.info("A Neptune Cluster allows you to store and query highly connected datasets with low latency."); + waitForInputToContinue(scanner); + String dbClusterId; + try { + dbClusterId = neptuneActions.createDBClusterAsync(clusterName).join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof ServiceQuotaExceededException) { + logger.error("The request failed due to service quota exceeded: {}", cause.getMessage()); + } else { + logger.error("An unexpected error occurred.", cause); + } + return; + } + + waitForInputToContinue(scanner); + logger.info(DASHES); + + logger.info(DASHES); + logger.info("3. Create a Neptune DB Instance"); + logger.info("In this step, we add a new database instance to the Neptune cluster"); + waitForInputToContinue(scanner); + try { + neptuneActions.createDBInstanceAsync(dbInstanceId, dbClusterId).join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof ServiceQuotaExceededException) { + logger.error("The request failed due to service quota exceeded: {}", cause.getMessage()); + } else { + logger.error("An unexpected error occurred.", cause); + } + return; + } + waitForInputToContinue(scanner); + logger.info(DASHES); + + logger.info(DASHES); + logger.info("4. Check the status of the Neptune DB Instance"); + logger.info(""" + In this step, we will wait until the DB instance + becomes available. This may take around 10 minutes. + """); + waitForInputToContinue(scanner); + try { + neptuneActions.checkInstanceStatus(dbInstanceId, "available").join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + logger.error("An unexpected error occurred.", cause); + return; + } + waitForInputToContinue(scanner); + logger.info(DASHES); + + logger.info(DASHES); + logger.info("5.Show Neptune Cluster details"); + waitForInputToContinue(scanner); + try { + neptuneActions.describeDBClustersAsync(clusterName).join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof ResourceNotFoundException) { + logger.error("The request failed due to the resource not found: {}", cause.getMessage()); + } else { + logger.error("An unexpected error occurred.", cause); + } + return; + } + waitForInputToContinue(scanner); + logger.info(DASHES); + + logger.info(DASHES); + logger.info("6. Stop the Amazon Neptune cluster"); + logger.info(""" + Once stopped, this step polls the status + until the cluster is in a stopped state. + """); + waitForInputToContinue(scanner); + try { + neptuneActions.stopDBClusterAsync(dbClusterId); + neptuneActions.waitForClusterStatus(dbClusterId, "stopped"); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof ResourceNotFoundException) { + logger.error("The request failed due to the resource not found: {}", cause.getMessage()); + } else { + logger.error("An unexpected error occurred.", cause); + } + return; + } + waitForInputToContinue(scanner); + logger.info(DASHES); + + logger.info(DASHES); + logger.info("7. Start the Amazon Neptune cluster"); + logger.info(""" + Once started, this step polls the clusters + status until it's in an available state. + We will also poll the instance status. + """); + waitForInputToContinue(scanner); + try { + neptuneActions.startDBClusterAsync(dbClusterId); + neptuneActions.waitForClusterStatus(dbClusterId, "available"); + neptuneActions.checkInstanceStatus(dbInstanceId, "available").join(); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof ResourceNotFoundException) { + logger.error("The request failed due to the resource not found: {}", cause.getMessage()); + } else { + logger.error("An unexpected error occurred.", cause); + } + return; + } + logger.info(DASHES); + + logger.info(DASHES); + logger.info("8. Delete the Neptune Assets"); + logger.info("Would you like to delete the Neptune Assets? (y/n)"); + String delAns = scanner.nextLine().trim(); + if (delAns.equalsIgnoreCase("y")) { + logger.info("You selected to delete the Neptune assets."); + try { + neptuneActions.deleteNeptuneResourcesAsync(dbInstanceId, clusterName, subnetGroupName); + } catch (CompletionException ce) { + Throwable cause = ce.getCause(); + if (cause instanceof ResourceNotFoundException) { + logger.error("The request failed due to the resource not found: {}", cause.getMessage()); + } else { + logger.error("An unexpected error occurred.", cause); + } + return; + } + } else { + logger.info("You selected not to delete Neptune assets."); + } + waitForInputToContinue(scanner); + logger.info(DASHES); + + logger.info(DASHES); + logger.info( + """ + Thank you for checking out the Amazon Neptune Service Use demo. We hope you + learned something new, or got some inspiration for your own apps today. + For more AWS code examples, have a look at: + https://docs.aws.amazon.com/code-library/latest/ug/what-is-code-library.html + """); + logger.info(DASHES); + } + + private static void waitForInputToContinue(Scanner scanner) { + while (true) { + logger.info(""); + logger.info("Enter 'c' followed by to continue:"); + String input = scanner.nextLine(); + + if (input.trim().equalsIgnoreCase("c")) { + logger.info("Continuing with the program..."); + logger.info(""); + break; + } else { + logger.info("Invalid input. Please try again."); + } + } + } +} +// snippet-end:[neptune.java2.scenario.main] \ No newline at end of file diff --git a/javav2/example_code/neptune/src/main/resources/log4j2.xml b/javav2/example_code/neptune/src/main/resources/log4j2.xml new file mode 100644 index 00000000000..914470047e7 --- /dev/null +++ b/javav2/example_code/neptune/src/main/resources/log4j2.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/javav2/example_code/neptune/src/test/java/NeptuneTest.java b/javav2/example_code/neptune/src/test/java/NeptuneTest.java new file mode 100644 index 00000000000..15fe4a8a4d7 --- /dev/null +++ b/javav2/example_code/neptune/src/test/java/NeptuneTest.java @@ -0,0 +1,106 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +import com.example.neptune.scenerio.NeptuneActions; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; + +import static org.junit.jupiter.api.Assertions.*; + +@TestInstance(TestInstance.Lifecycle.PER_METHOD) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class NeptuneTest { + private static String subnetGroupName = "neptuneSubnetGroupTest" ; + private static String clusterName = "neptuneClusterTest" ; + private static String dbInstanceId = "neptuneDBTest" ; + private static NeptuneActions neptuneActions = new NeptuneActions(); + private static String dbClusterId = ""; + + @Test + @Tag("IntegrationTest") + @Order(1) + public void testCreateSubnetGroup() { + assertDoesNotThrow(() -> { + neptuneActions.createSubnetGroupAsync(subnetGroupName).join(); + }); + System.out.println("Test 1 passed"); + } + + @Test + @Tag("IntegrationTest") + @Order(2) + public void testCreateCluster() { + assertDoesNotThrow(() -> { + dbClusterId = neptuneActions.createDBClusterAsync(clusterName).join(); + assertFalse(dbClusterId.trim().isEmpty(), "DB Cluster ID should not be empty"); + }); + System.out.println("Test 2 passed"); + } + + @Test + @Tag("IntegrationTest") + @Order(3) + public void testCreateDBInstance() { + assertDoesNotThrow(() -> { + neptuneActions.createDBInstanceAsync(dbInstanceId, dbClusterId).join(); + }); + System.out.println("Test 3 passed"); + } + + @Test + @Tag("IntegrationTest") + @Order(4) + public void testCheckInstance() { + assertDoesNotThrow(() -> { + neptuneActions.checkInstanceStatus(dbInstanceId, "available").join(); + }); + System.out.println("Test 4 passed"); + } + + @Test + @Tag("IntegrationTest") + @Order(5) + public void testDescribeDBCluster() { + assertDoesNotThrow(() -> { + neptuneActions.describeDBClustersAsync(clusterName).join(); + }); + System.out.println("Test 5 passed"); + } + + @Test + @Tag("IntegrationTest") + @Order(6) + public void testStopDBCluster() { + assertDoesNotThrow(() -> { + neptuneActions.stopDBClusterAsync(dbClusterId); + neptuneActions.waitForClusterStatus(dbClusterId,"stopped"); + }); + System.out.println("Test 6 passed"); + } + + @Test + @Tag("IntegrationTest") + @Order(7) + public void testStartDBCluster() { + assertDoesNotThrow(() -> { + neptuneActions.startDBClusterAsync(dbClusterId); + neptuneActions.waitForClusterStatus(dbClusterId,"available"); + neptuneActions.checkInstanceStatus(dbInstanceId, "available").join(); + }); + System.out.println("Test 7 passed"); + } + + @Test + @Tag("IntegrationTest") + @Order(8) + public void testDeleteResources() { + assertDoesNotThrow(() -> { + neptuneActions.deleteNeptuneResourcesAsync(dbInstanceId, clusterName, subnetGroupName); + }); + System.out.println("Test 8 passed"); + } +} diff --git a/javav2/usecases/creating_neptune_lambda/README.md b/javav2/usecases/creating_neptune_lambda/README.md new file mode 100644 index 00000000000..e91bb8e63ae --- /dev/null +++ b/javav2/usecases/creating_neptune_lambda/README.md @@ -0,0 +1,339 @@ +# Accessing Neptune Graph Data from Lambda in a VPC Using the AWS SDK for Java + +## Overview + +| Heading | Description | +| ----------- | ----------- | +| Description | Discusses how to develop an AWS Lambda function that queries Amazon Neptune data within the VPC using the AWS SDK for Java (v2). | +| Audience | Developer (intermediate) | +| Required skills | Java, Maven | + +This guide provides a step-by-step walkthrough for creating and deploying an AWS Lambda function that queries an Amazon Neptune graph database using the Neptune Data API. + +Amazon Neptune is a fully managed graph database service designed to operate within a Virtual Private Cloud (VPC). Because of this, any Lambda function that needs to access Neptune must also run inside the same VPC and be granted appropriate network and IAM permissions. External access is not supported. + +To ensure secure and reliable communication between Lambda and Neptune, you’ll configure key AWS infrastructure components, including VPC subnets, security groups, and IAM roles. This guide covers all necessary setup and configuration tasks to help you successfully connect your Lambda function to Neptune using the Neptune Data API. + +**Note**: Lambda is a compute service that you can use to run code without provisioning or managing servers. You can create Lambda functions in various programming languages. For more information about Lambda, see +[What is AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html). + +#### Topics ++ Prerequisites ++ Set Up the Amazon Neptune Cluster and VPC ++ Create an AWS Identity and Access Management (IAM) role that is used to execute Lambda functions ++ Create an IntelliJ project ++ Add the POM dependencies to your project ++ Create a Lambda function by using the Lambda runtime API ++ Package the project that contains the Lambda function ++ Deploy the Lambda function + +## Prerequisites +To follow along with this tutorial, you need the following: ++ An Amazon Neptune DB instance in a VPC. You can get this by running the Neptune Basics scenario located in AWS Code Library. ++ A security group that allows traffic from Lambda to Neptune (typically on port 8182). ++ An AWS account with proper credentials. ++ AWS CLI configured with permissions for Lambda, IAM, EC2 (VPC), S3, Neptune. For information about setting up AWS CLI, see [Setting up the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-quickstart.html) ++ A Java IDE. (For this tutorial, the IntelliJ IDE is used.) ++ Java 21 JDK. ++ Maven 3.6 or higher. + +### Important + ++ The AWS services included in this document are included in the [AWS Free Tier](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc). ++ This code has not been tested in all AWS Regions. Some AWS services are available only in specific Regions. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). ++ Running this code might result in charges to your AWS account. ++ Be sure to delete all of the resources that you create during this tutorial so that you won't be charged. + +## Set Up the Amazon Neptune Cluster and VPC + +Amazon Neptune requires a VPC with at least two subnets in different Availability Zones (AZs) to ensure high availability and fault tolerance. + +If you're unsure which VPC or subnets to use, you can easily generate the required resources by running the Amazon Neptune Basics scenario from the AWS Code Library. This setup will provision: + + - A suitable VPC with subnets in multiple AZs + + - A Neptune DB cluster and instance + + - All necessary networking and security configurations + +This is a quick way to get a working Neptune environment that you can immediately use for this use case. + +### Add data to the database + +Once your Amazon Neptune cluster and database are set up, the next step is to load data into it. This data will be accessed by the AWS Lambda function created as part of this guide. + +Amazon Neptune supports multiple data loading methods, including bulk loading from Amazon S3, Gremlin and SPARQL queries, and integration with AWS Database Migration Service. + +To efficiently populate your Neptune database, use the Neptune bulk loader, which imports data stored in Amazon S3 using formats such as CSV, RDF, or Turtle. +For information on how to add data to the Amazon Neptune database, see [Loading Data into a Neptune DB Instance](https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-data.html). + +## Create the Lambda Execution IAM Role + +### Create trust policy JSON file + +You need to create the trust polciy used for this IAM role. Name the file **trust-policy-lambda.json**. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "lambda.amazonaws.com" }, + "Action": "sts:AssumeRole" + } + ] +} + +``` + +### Create the lambda-execution-role role + +You can create the **lambda-execution-role** role by using this CLI command. + +```bash +aws iam create-role \ + --role-name lambda-execution-role \ + --assume-role-policy-document file://trust-policy-lambda.json +``` +### Attach the required managed policies + +Run each of the following AWS CLI commands to attach the necessary managed policies to the Lambda execution role: + +```bash +aws iam attach-role-policy \ + --role-name lambda-execution-role \ + --policy-arn arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess + +aws iam attach-role-policy \ + --role-name lambda-execution-role \ + --policy-arn arn:aws:iam::aws:policy/AWSNeptuneFullAccess + +aws iam attach-role-policy \ + --role-name lambda-execution-role \ + --policy-arn arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole + +aws iam attach-role-policy \ + --role-name lambda-execution-role \ + --policy-arn arn:aws:iam::aws:policy/CloudWatchLogsFullAccess + +``` + + +## Create an IntelliJ project + +1. In the IntelliJ IDE, choose **File**, **New**, **Project**. + +2. In the **New Project** dialog box, choose **Maven**, and then choose **Next**. + +3. For **GroupId**, enter **org.example**. + +4. For **ArtifactId**, enter **NeptuneLambda**. + +5. Choose **Next**. + +6. Choose **Finish**. + +## Add the POM dependencies to your project + +At this point, you have a new project named **NeptuneLambda**. Make sure that your project's **pom.xml** file looks like the POM file in this Github repository. + +## Create a Lambda function by using the Lambda runtime Java API + +Use the Lambda runtime Java API to create the Java class that defines the Lamdba function. In this example, there is one Java class for the Lambda function named **NeptuneLambdaHandler**. + + +### NeptuneLambdaHandler class + +This Java code represents the **NeptuneLambdaHandler** class. The class use the Neptune Data Client API to query data from the Neptune graph database. + +```java +package org.example; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.LambdaLogger; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunedata.NeptunedataClient; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinQueryRequest; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinQueryResponse; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.net.URI; +import java.time.Duration; +import java.util.Map; + +public class NeptuneLambdaHandler implements RequestHandler, String> { + + @Override + public String handleRequest(Map event, Context context) { + LambdaLogger logger = context.getLogger(); + + String NEPTUNE_ENDPOINT = ":8182"; + + NeptunedataClient neptunedataClient = NeptunedataClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(URI.create(NEPTUNE_ENDPOINT)) + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofSeconds(10)) + .socketTimeout(Duration.ofSeconds(30))) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .apiCallAttemptTimeout(Duration.ofSeconds(30)) + .build()) + .build(); + + // Execute Gremlin Query + logger.log("Executing Gremlin PROFILE query...\n"); + + ExecuteGremlinQueryRequest queryRequest = ExecuteGremlinQueryRequest.builder() + .gremlinQuery("g.V().hasLabel('person').values('name')") + .build(); + + ExecuteGremlinQueryResponse response = neptunedataClient.executeGremlinQuery(queryRequest); + + // Log full response as JSON + logger.log("Full Response:\n"); + try { + ObjectMapper mapper = new ObjectMapper(); + String jsonResponse = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(response); + logger.log(jsonResponse + "\n"); + } catch (Exception e) { + logger.log("Failed to serialize response: " + e.getMessage() + "\n"); + } + + // Log result specifically + if (response.result() != null) { + logger.log("Query Result:\n" + response.result().toString() + "\n"); + } else { + logger.log("No result returned from the query.\n"); + } + + return "Done"; + } +} + +``` + +**Note**: Make sure that you assign your **NEPTUNE_ENDPOINT** with the Neptune endpoint. You can get this value by running the Neptune Basics scenario located in the code lib. + +## Package the project that contains the Lambda functions + +Package up the project into a .jar (JAR) file by using the following Maven command. + + mvn clean package shade:shade + +This creates a shaded JAR file that is located in the **target** folder (which is a child folder of the project folder). + +**Note**: The **maven-shade-plugin** is used in the project’s POM file. This plugin is responsible for creating a .jar file that contains the required dependencies. If you attempt to package up the project without this plugin, the required dependences are not included in the .jar file and you will encounter a **ClassNotFoundException**. + +## Deploy the Lambda function + +You can deploy the Lambda function using the AWS CLI. Be sure to specify the correct VPC subnets and security group associated with your Neptune database. These values can be retrieved by running the Neptune Basics Scenario located in the AWS Code Library. + +The following command creates a Lambda function configured to run inside your VPC: + + +```bash +aws lambda create-function \ + --function-name NeptuneLoader \ + --runtime java21 \ + --role arn:aws:iam::123456789012:role/lambda-execution-role \ + --handler org.example.NeptuneLambdaHandler::handleRequest \ + --timeout 900 \ + --memory-size 1024 \ + --zip-file fileb://target/my-lambda-jar-with-dependencies.jar \ + --vpc-config SubnetIds=subnet-abcdxxxx,subnet-xyz9xxxx,SecurityGroupIds=sg-abc1xxxx + +``` +You're not required to explicitly specify the VPC ID in the **create-function** command. Instead, you specify the subnets and security groups, which together imply the VPC. + +Ensure thay you specify the correct values such as the IAM role and the proper Lambda handler. + +### Configure Security Group rules + +To enable communication between your Lambda function and the Neptune database, you must configure the security +group rules properly. You must allow inbound traffic on port 8182 from the Lambda function's security group. +Use the following CLI command. + +``` bash + +aws ec2 authorize-security-group-ingress \ + --group-id \ + --protocol tcp \ + --port 8182 \ + --source-group \ + --description "Allow Lambda SG access to Neptune on port 8182" + +``` +In addition, allow outbound traffic on port 8182 to the Neptune DB (by default, all outbound traffic is allowed — verify if restricted). + +Use the following CLI command. + +``` bash +aws ec2 authorize-security-group-egress \ + --group-id \ + --protocol tcp \ + --port 8182 \ + --destination-group \ + --description "Allow Lambda to send traffic to Neptune on port 8182" +``` + +### Invoke your Lambda function + +You can invoke the Lambda function using this CLI command. + +```bash +aws lambda invoke --function-name NeptuneLoader output. +``` + +You will see the following command line message. +```json + { + "StatusCode": 200, + "ExecutedVersion": "$LATEST" + } +``` + +Check the output.log for immediate output, but your logs will be detailed in CloudWatch. + +### View CloudWatch Logs + +After invoking your Lambda function, you can view the logs generated by the function in Amazon CloudWatch. Use the AWS CLI commands below to inspect the log groups, streams, and log events for your NeptuneLoader function: + +#### Find the Log Group + +```bash +aws logs describe-log-groups | grep NeptuneLoader +``` + +#### List Log Streams in the Log Group + +```bash +aws logs describe-log-streams \ + --log-group-name /aws/lambda/NeptuneLoader \ + --order-by LastEventTime \ + --descending +``` + +This lists the available log streams sorted by the most recent activity. + +#### View Log Events from a Specific Stream + +Once you identify a logStreamName from the previous step, use the following command to fetch log events: + +```bash +aws logs get-log-events \ + --log-group-name /aws/lambda/NeptuneLoader \ + --log-stream-name +``` + +Replace with the actual stream name returned in the previous command. + +### Next steps +Congratulations, you have created a Lambda function that queries Neptune data. As stated at the beginning of this tutorial, be sure to delete all of the resources that you created during this tutorial so that you won't be charged. + +For more AWS multiservice examples, see +[usecases](https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/javav2/usecases). + + diff --git a/javav2/usecases/creating_neptune_lambda/pom.xml b/javav2/usecases/creating_neptune_lambda/pom.xml new file mode 100644 index 00000000000..580fece6b80 --- /dev/null +++ b/javav2/usecases/creating_neptune_lambda/pom.xml @@ -0,0 +1,113 @@ + + + 4.0.0 + + org.example + NeotuneLambda + 1.0-SNAPSHOT + + + UTF-8 + 21 + 21 + 21 + + + + + software.amazon.awssdk + bom + 2.31.8 + pom + import + + + org.apache.logging.log4j + log4j-bom + 2.23.1 + pom + import + + + + + + org.junit.jupiter + junit-jupiter + 5.11.4 + test + + + software.amazon.awssdk + neptune + + + software.amazon.awssdk + neptunedata + + + software.amazon.awssdk + neptunegraph + + + software.amazon.awssdk + apache-client + 2.25.38 + + + com.google.code.gson + gson + 2.10.1 + + + com.fasterxml.jackson.core + jackson-databind + 2.17.0 + + + software.amazon.awssdk + ssooidc + + + software.amazon.awssdk + sso + + + software.amazon.awssdk + iam-policy-builder + + + software.amazon.awssdk + s3 + + + org.apache.tinkerpop + gremlin-driver + 3.6.4 + + + org.apache.logging.log4j + log4j-core + + + com.amazonaws + aws-lambda-java-core + 1.2.1 + + + org.slf4j + slf4j-api + 2.0.13 + + + org.apache.logging.log4j + log4j-slf4j2-impl + + + org.apache.logging.log4j + log4j-1.2-api + + + diff --git a/javav2/usecases/creating_neptune_lambda/src/main/java/org/example/NeptuneLambdaHandler.java b/javav2/usecases/creating_neptune_lambda/src/main/java/org/example/NeptuneLambdaHandler.java new file mode 100644 index 00000000000..fe861e21ff1 --- /dev/null +++ b/javav2/usecases/creating_neptune_lambda/src/main/java/org/example/NeptuneLambdaHandler.java @@ -0,0 +1,67 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.example; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.LambdaLogger; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.neptunedata.NeptunedataClient; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinQueryRequest; +import software.amazon.awssdk.services.neptunedata.model.ExecuteGremlinQueryResponse; +import com.fasterxml.jackson.databind.ObjectMapper; +import java.net.URI; +import java.time.Duration; +import java.util.Map; + +public class NeptuneLambdaHandler implements RequestHandler, String> { + + @Override + public String handleRequest(Map event, Context context) { + LambdaLogger logger = context.getLogger(); + + String NEPTUNE_ENDPOINT = "https://neptunecluster65.cluster-ro-csf1if1wwrox.us-east-1.neptune.amazonaws.com:8182"; + + NeptunedataClient neptunedataClient = NeptunedataClient.builder() + .region(Region.US_EAST_1) + .endpointOverride(URI.create(NEPTUNE_ENDPOINT)) + .httpClientBuilder(ApacheHttpClient.builder() + .connectionTimeout(Duration.ofSeconds(10)) + .socketTimeout(Duration.ofSeconds(30))) + .overrideConfiguration(ClientOverrideConfiguration.builder() + .apiCallAttemptTimeout(Duration.ofSeconds(30)) + .build()) + .build(); + + // Execute Gremlin Query + logger.log("Executing Gremlin PROFILE query...\n"); + + ExecuteGremlinQueryRequest queryRequest = ExecuteGremlinQueryRequest.builder() + .gremlinQuery("g.V().hasLabel('person').values('name')") + .build(); + + ExecuteGremlinQueryResponse response = neptunedataClient.executeGremlinQuery(queryRequest); + + // Log full response as JSON + logger.log("Full Response:\n"); + try { + ObjectMapper mapper = new ObjectMapper(); + String jsonResponse = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(response); + logger.log(jsonResponse + "\n"); + } catch (Exception e) { + logger.log("Failed to serialize response: " + e.getMessage() + "\n"); + } + + // Log result specifically + if (response.result() != null) { + logger.log("Query Result:\n" + response.result().toString() + "\n"); + } else { + logger.log("No result returned from the query.\n"); + } + + return "Done"; + } +} diff --git a/scenarios/basics/neptune/README.md b/scenarios/basics/neptune/README.md new file mode 100644 index 00000000000..35e834aff99 --- /dev/null +++ b/scenarios/basics/neptune/README.md @@ -0,0 +1,36 @@ +## Overview +This Amazon Neptune basic scenario demonstrates how to interact with Amazon Neptune using an AWS SDK. The scenario covers various operations such as creating a cluster, creating an instance, starting and stopping the cluster, and so on. + +## Key Operations + +1. **Create a Neptune DB Subnet Group**: + - Creates a Neptune DB Subnet Group by invoking `createDBSubnetGroup`. + +2. **Create a Neptune Cluster**: + - Description: Creates a Neptune Cluster by invoking `createDBCluster`. + +3. **Create a Neptune DB Instance**: + - Description: Creates a Neptune DB Instance by invoking `createDBInstance`. + +4. **Check the status of the Neptune DB Instance**: + - Description: Check the status of the DB instance by invoking `describeDBInstances`. Poll the instance until it reaches an `availbale`state. + +**Note** See the [Engineering specification](SPECIFICATION.md) for a full listing of operations. + +## Resources + +This Basics scenario does not require any additional AWS resources. + +## Implementations + +This scenario example will be implemented in the following languages: + +- Java +- Python +- Kotlin + +## Additional Reading + +- [Amazon Neptune Documentation](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html) + +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 diff --git a/scenarios/basics/neptune/SPECIFICATION.md b/scenarios/basics/neptune/SPECIFICATION.md new file mode 100644 index 00000000000..7575ebc8be1 --- /dev/null +++ b/scenarios/basics/neptune/SPECIFICATION.md @@ -0,0 +1,289 @@ +# Amazon Neptune Service Scenario Specification + +## Overview +This SDK Basics scenario demonstrates how to interact with Amazon Neptune using an AWS SDK. +It demonstrates various tasks such as creating a Neptune DB Subnet Group, creating a Neptune Cluster, creating a Neptune DB Instance, and so on. + +Finally this scenario demonstrates how to clean up resources. Its purpose is to demonstrate how to get up and running with Amazon Neptune and an AWS SDK. + +## Is using NeptuneClient worth while (Amazon Bedrock results) + +Here is more context on when it's a good idea to use the `NeptuneAsyncClient`: + +1. **Dynamic Resource Provisioning**: The `NeptuneAsyncClient` can be particularly useful when you need to dynamically create, update, or delete Neptune resources as part of your application's functionality. This could be useful in use cases such as: + + - **Multi-tenant Applications**: If you're building a SaaS application that needs to provision Neptune instances on-demand, the `NeptuneAsyncClient` can help you automate this process programmatically. + - **Ephemeral Environments**: When you need to spin up and tear down Neptune resources as part of your CI/CD pipeline or within a Lambda environments, the `NeptuneAsyncClient` can streamline this process. + - **Scaling and Elasticity**: If your application needs to scale Neptune resources up or down based on demand, the `NeptuneAsyncClient` can help you manage these changes dynamically. + +2. **Integrations and Workflow Automation**: The `NeptuneAsyncClient` can be beneficial when you need to integrate Neptune provisioning and management into larger, automated workflows. For example: + + - **DevOps Tooling**: You can use the `NeptuneAsyncClient` as part of your infrastructure-as-code (IaC) tooling, such as building custom scripts that can provision Neptune resources on-demand. + - **Serverless Architectures**: When deploying serverless applications that rely on Neptune, the `NeptuneAsyncClient` can help you manage the Neptune components of your serverless stack. + + +3. **Rapid Prototyping and Experimentation**: The programmatic nature of the `NeptuneAsyncClient` can be beneficial when you need to quickly set up and tear down Neptune resources for prototyping, testing, or experimentation purposes. This can be particularly useful for: + + - **Proof-of-Concepts**: When validating ideas or testing new features that require a Neptune database, the `NeptuneAsyncClient` can help you provision the necessary resources with minimal overhead. + - **Performance Testing**: If you need to stress-test your Neptune-powered application, the NeptuneAsyncClient can help you programmatically create and manage the required test environments. + - **Data Migrations**: When migrating data between Neptune instances or across AWS Regions, the NeptuneAsyncClient can streamline the process of provisioning the necessary resources. + +The key advantage of the `NeptuneAsyncClient` is its ability to provide fine-grained, programmatic control over Neptune resources. This can be particularly valuable in dynamic, automated, or rapidly changing environments where the flexibility and programmability of the `NeptuneAsyncClient` can help streamline your application's Neptune-related infrastructure management. + +### Use Case Recommendation + +- Infrastructure as code (IaC): Prefer CDK, CloudFormation, or Terraform +- Dynamic provisioning in app - Use NeptuneAsyncClient +- Internal tooling or automation - Use NeptuneAsyncClient + - Manual ad hoc cluster setup - Use CLI or SDK (sync/async) + +## Resources +This Basics scenario does not require any additional AWS resources. + +## Hello Amazon Neptune +This program is intended for users not familiar with Amazon Neptune to easily get up and running. The program invokes `describeDBClustersPaginator`to iterate through subnet groups. + +## Basics Scenario Program Flow +The Amazon Neptune Basics scenario executes the following operations. + +1. **Create a Neptune DB Subnet Group**: + - Description: Creates a Neptune DB Subnet Group by invoking `createDBSubnetGroup`. + - Exception Handling: Check to see if a `ServiceQuotaExceededException` is thrown. + If so, display the message and end the program. + +2. **Create a Neptune Cluster**: + - Description: Creates a Neptune Cluster by invoking `createDBCluster`. + - Exception Handling: Check to see if a `ServiceQuotaExceededException` is thrown. If it is thrown, if so, display the message and end the program. + +3. **Create a Neptune DB Instance**: + - Description: Creates a Neptune DB Instance by invoking `createDBInstance`. + - Exception Handling: Check to see if an `ServiceQuotaExceededException` is thrown. If so, display the message and end the program. + +4. **Check the status of the Neptune DB Instance**: + - Description: Check the status of the DB instance by invoking `describeDBInstances`. Poll the instance until it reaches an `availbale`state. + - Exception Handling: This operatioin handles a `CompletionException`. If thrown, display the message and end the program. + +5. **Show Neptune Cluster details**: + - Description: Shows the details of the cluster by invoking `describeDBClusters`. + - Exception Handling: Check to see if a `ResourceNotFoundException` is thrown. If so, display the message and end the program. + +6. **Stop the Cluster**: + - Description: Stop the cluster by invoking `stopDBCluster`. Poll the cluster until it reaches a `stopped`state. + - Exception Handling: Check to see if a `ResourceNotFoundException` is thrown. If so, display the message and end the program. + +7. **Start the cluster**: + - Description: Start the cluster by invoking `startBCluster`. Poll the cluster until it reaches an `available`state. + - Exception Handling: Check to see if a `ResourceNotFoundException` is thrown. If so, display the message and end the program. + + +8. **Delete the Neptune Assets**: + - Description: Delete the various resources. + - Exception Handling: Check to see if an `ResourceNotFoundException` is thrown. If so, display the message and end the program. + +### Program execution +The following shows the output of the Amazon Neptune Basics scenario. + +``` + Amazon Neptune is a fully managed graph + database service by AWS, designed specifically + for handling complex relationships and connected + datasets at scale. It supports two popular graph models: + property graphs (via openCypher and Gremlin) and RDF + graphs (via SPARQL). This makes Neptune ideal for + use cases such as knowledge graphs, fraud detection, + social networking, recommendation engines, and + network management, where relationships between + entities are central to the data. + +Being fully managed, Neptune handles database +provisioning, patching, backups, and replication, +while also offering high availability and durability +within AWS's infrastructure. + +For developers, programming with Neptune allows +for building intelligent, relationship-aware +applications that go beyond traditional tabular +databases. Developers can use the AWS SDK for Java +V2 to automate infrastructure operations +(via NeptuneClient). + +Let's get started... + + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +1. Create a Neptune DB Subnet Group +The Neptune DB subnet group is used when launching a Neptune cluster + +Enter 'c' followed by to continue: +c +Continuing with the program... + +Subnet group created: neptunesubnetgroup56 + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +2. Create a Neptune Cluster +A Neptune Cluster allows you to store and query highly connected datasets with low latency. + +Enter 'c' followed by to continue: +c +Continuing with the program... + +DB Cluster created: neptunecluster56 + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +3. Create a Neptune DB Instance +In this step, we add a new database instance to the Neptune cluster + +Enter 'c' followed by to continue: +c +Continuing with the program... + +Created Neptune DB Instance: neptunedb56 + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +4. Check the status of the Neptune DB Instance +In this step, we will wait until the DB instance +becomes available. This may take around 10 minutes. + + +Enter 'c' followed by to continue: +c +Continuing with the program... + + Neptune instance reached desired status 'available' after 10 mins, 29 secs. + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +5.Show Neptune Cluster details + +Enter 'c' followed by to continue: +c +Continuing with the program... + +Cluster Identifier: neptunecluster56 +Status: available +Engine: neptune +Engine Version: 1.4.5.0 +Endpoint: neptunecluster56.cluster-csf1if1wwrox.us-east-1.neptune.amazonaws.com +Reader Endpoint: neptunecluster56.cluster-ro-csf1if1wwrox.us-east-1.neptune.amazonaws.com +Availability Zones: [us-east-1f, us-east-1c, us-east-1a] +Subnet Group: default +VPC Security Groups: + - sg-dd2e43f0 +Storage Encrypted: false +IAM DB Auth Enabled: false +Backup Retention Period: 1 days +Preferred Backup Window: 04:54-05:24 +Preferred Maintenance Window: sat:03:37-sat:04:07 +------ + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +6. Stop the Amazon Neptune cluster +Once stopped, this step polls the status +until the cluster is in a stopped state. + + +Enter 'c' followed by to continue: +c +Continuing with the program... + +DB Cluster Stopped + Waiting for cluster 'neptunecluster56' to reach status 'stopped'... + Neptune cluster reached desired status 'stopped' after 12 mins, 10 secs. + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +7. Start the Amazon Neptune cluster +Once started, this step polls the clusters +status until it's in an available state. +We will also poll the instance status. + + +Enter 'c' followed by to continue: +c +Continuing with the program... + + DB Cluster starting... + Waiting for cluster 'neptunecluster56' to reach status 'available'... + Neptune cluster reached desired status 'available' after 10 mins, 28 secs. + Neptune instance reached desired status 'available' after 0 secs. +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +8. Delete the Neptune Assets +Would you like to delete the Neptune Assets? (y/n) +y +You selected to delete the Neptune assets. + Deleting DB Instance: neptuneDB56 + Instance neptuneDB56 deleted after 750s + Deleting DB Cluster: neptuneCluster56 + Deleting Subnet Group: neptuneSubnetGroup56 + Neptune resources deleted successfully. + +Enter 'c' followed by to continue: +c +Continuing with the program... + +-------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- +Thank you for checking out the Amazon Neptune Service Use demo. We hope you +learned something new, or got some inspiration for your own apps today. +For more AWS code examples, have a look at: +https://docs.aws.amazon.com/code-library/latest/ug/what-is-code-library.html + +-------------------------------------------------------------------------------- +``` + +## SOS Tags + +The following table describes the metadata used in this Basics Scenario. The metadata file is `neptune_metadata.yaml`. + + +| action | metadata key | +|------------------------|------------------------------------- | +|`createDBSubnetGroup` | neptune_CreateDBSubnetGroup | +|`createDBCluster` | neptune_CreateDBCluster | +|`createDBInstance` | neptune_CreateDBInstance | +|`describeDBInstances ` | neptune_DescribeDBInstances | +|`describeDBClusters` | neptune_DescribeDBClusters | +| `stopDBCluster` | neptune_StopDBCluster | +|`startDBCluster ` | neptune_StartDBCluster | +|`deleteDBInstance ` | neptune_DeleteDBInstance | +| `deleteDBCluster` | neptune_DeleteDBCluster | +| `deleteDBSubnetGroup `| neptune_DeleteDBSubnetGroup | +| `scenario` | neptune_Scenario | +| `hello` | neptune_Hello | + + +