diff --git a/.gitignore b/.gitignore index 0c02a85..99876e5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,10 @@ target dependency-reduced-pom.xml *~ -.*~ \ No newline at end of file +.*~ +.cwlogs +.next-shard-iterator +.DS_Store +venv* +.terraform* +terraform.tfstate* diff --git a/Dockerfile b/Dockerfile index 08e6d8f..996b87c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,14 @@ -# The parent Flink image (flink:1.13.2-scala_2.12-java11) only contains the JRE (openjdk:11-jre), and it is missing key +# The parent Flink image (flink:1.18.1-java11) only contains the JRE (openjdk:11-jre), and it is missing key # diagnostic tools. This multistage build will overwrite the JRE with the JDK from openjdk:11 # See https://docs.docker.com/develop/develop-images/multistage-build/ -FROM openjdk:11 as jdk_image -FROM flink:1.16.2-java11 +FROM --platform=linux/amd64 openjdk:11 AS jdk_image +FROM --platform=linux/amd64 flink:1.18.1-java11 # Copy the JDK from the jdk_image -COPY --from=jdk_image /usr/local/openjdk-11 /usr/local/openjdk-11 +COPY --from=jdk_image /usr/local/openjdk-11 /opt/java/openjdk/ -RUN sed -i -e 's/^.*networkaddress.cache.ttl=.*$/networkaddress.cache.ttl=30/g' /usr/local/openjdk-11/conf/security/java.security -RUN sed -i -e 's/^.*networkaddress.cache.negative.ttl=.*$/networkaddress.cache.negative.ttl=10/g' /usr/local/openjdk-11/conf/security/java.security +RUN sed -i -e 's/^.*networkaddress.cache.ttl=.*$/networkaddress.cache.ttl=30/g' /opt/java/openjdk/conf/security/java.security +RUN sed -i -e 's/^.*networkaddress.cache.negative.ttl=.*$/networkaddress.cache.negative.ttl=10/g' /opt/java/openjdk/conf/security/java.security # The 2019 AWS rds root cert ADD rds-ca-2019-root.pem /etc/rds-ca-2019-root.pem @@ -43,7 +43,7 @@ RUN mkdir -p $FLINK_JOB_DIR COPY target/my-stateful-functions-embedded-java-3.3.0.jar ${FLINK_JOB_DIR}/flink-job.jar RUN chown -R flink:flink ${FLINK_JOB_DIR}/ -ENV PLUGIN_NAME flink-s3-fs-hadoop-1.16.2 +ENV PLUGIN_NAME flink-s3-fs-hadoop-1.18.1 RUN mkdir -p "${FLINK_HOME}/plugins/${PLUGIN_NAME}" RUN ln -fs "${FLINK_HOME}/opt/${PLUGIN_NAME}.jar" "${FLINK_HOME}/plugins/${PLUGIN_NAME}" diff --git a/README.md b/README.md index 862f22c..be02003 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,9 @@ The purpose of this project is two-fold. [Caliper](https://www.imsglobal.org/spec/caliper/v1p2) events. * This project egresses results as events to a separate stream, whereas at Imagine Learning we mostly send our results directly to OpenSearch and occasionally write events back to the ingress stream. - 2. It will serve as the basis for an evaluation of Stateful Functions running on + 2. It serves as the basis for an evaluation of Stateful Functions running on [AWS Managed Flink](https://docs.aws.amazon.com/managed-flink/). At the time of - this writing Imagine Learning runs stateful functions on self-managed Kubernetes clusters, but we are looking to + this writing Imagine Learning runs Flink Stateful Functions on self-managed Kubernetes clusters, but we are looking to see if AWS Managed Flink is a viable alternative. @@ -23,7 +23,8 @@ This project demonstrates stateful functions under test in various ways: * run-time execution in standalone job mode via docker compose -The project implements embedded functions (functions that execute in the Flink taskmanagers). Remote functions are future work. +The project implements embedded functions (functions that execute in the Flink taskmanagers). Remote functions are +future work. This is an opinionated project. It uses... * Spring Framework for dependency injection @@ -38,7 +39,7 @@ This is an opinionated project. It uses... Each forwarder is small piece of code that routes one or more specific event types to a stateful function. To start routing a new event type, just implement another Forwarder. -## What this Stateful Functions appication does +## What this Stateful Functions application does Example events and functions are provided which demonstrate notifying a shopping cart service of product price and availability changes for items in users' carts. The project assumes the existence of upstream microservices that send Product events (name,price,availability) and @@ -91,22 +92,24 @@ Users running on Apple silicon should ensure that the file ~/.m2/settings.xml ex ``` -To compile the code and run the tests using the included Maven wrapper script... + +To compile the code and run the tests using the included Maven wrapper script, first see below about +building and installing Apache Flink Stateful Functions compatible with Flink 1.18, then do this: ``` ./mvnw test ``` -## Running the project via Docker Compose +## Running the project locally via Docker Compose Follow the instructions below to run the project via Docker Compose. Note that Kinesis support is provided by a [localstack](https://www.localstack.cloud/) container. The demo works using three docker compose "profiles" (phases). -1. In the first phase, the flink cluster running our stateful function application is started, +1. In the first phase, the flink cluster running our stateful function application is started, along with localstack, and an aws-cli container that creates the ingress and egress Kinesis streams. -2. The second phase runs an aws-cli container to send events to the ingress stream. The events +2. The second phase runs an aws-cli container to send events to the ingress stream. The events sent are from [product-cart-integration-test-events.jsonl](./src/test/resources/product-cart-integration-test-events.jsonl) -3. The third phase runs an aws-cli container to fetch the events from the egress stream and output them to the console. +3. The third phase runs an aws-cli container to fetch the events from the egress stream and output them to the console. ```shell # Build this project and create the jar file ./mvnw package @@ -131,3 +134,257 @@ docker compose --profile get-egress-events up docker compose --profile all down ``` +## Running the project via AWS Managed Flink + +### Version compatibility between AWS Managed Flink and Stateful Functions + +The latest release of Apache Flink Stateful Functions is 3.3, but its compiled and built +to run with Flink 1.16.2. AWS Managed Flink currently supports Flink versions 1.15 and 1.18. So the first +step towards running via AWS Managed Flink is to create a version of the stateful functions library +compatible with Flink 1.18. The required changes are provided here: +https://github.com/kellinwood/flink-statefun/pull/1/files. +Clone that repo, checkout the `release-3.3-1.18` +branch, and build/install it locally via `mvn install` + +### Build and package this project +```shell +mvn package +``` + +The demo can be provisioned in AWS in three ways... via CloudFormation, Terraform, or Crossplane + +### Provisioning via AWS CloudFormation + +The templates and scripts used for provisioning the AWS resources via CloudFormation are in the [aws-cloudformation](./aws-cloudformation) directory. +``` +cd aws-cloudformation +``` + +#### Create an S3 bucket and upload this project's JAR file + +To create the bucket, create a CloudFormation stack named `flink-cf-demo-bucket` as defined [here](./aws-cloudformation/flink-cf-demo-bucket-stack.yaml), +and after that finishes, use the AWS CLI to upload the jar file: + +```shell +export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://flink-cf-demo-bucket-${AWS_ACCOUNT_ID}/ +``` + +#### Create the Kinesis streams, Managed Flink application, and related AWS Resources + +Create a CloudFormation stack named `flink-cf-demo` as defined by the CloudFormation templates [here](./aws-cloudformation/flink-cf-demo-stack.yaml). +This stack includes a custom resource lambda that programmatically configures logging when the Flink application is created, +and transitions the application from the Ready to Running state. + + +#### Monitor the CloudWatch logging output + +The following script will show all the log entries from the start of application launch, and will +wait for new entries to arrive and display them too. The script will resume from where it +left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. +```shell +./demo-tail-logs.sh +``` + +### Send sample events to the ingress stream +```shell +./demo-send-events.sh +``` + +#### Get and display the events published to the egress stream +This script will show all events published to the egress stream since the start of application launch, and will +wait for new entries to arrive and display them too. +```shell +./demo-tail-egress.sh +``` +#### Cleanup +Cleanup by manually deleting the jar file from the S3 bucket and the ingress Kinesis stream. Then delete the +Cloud Formation stacks. Cloud Formation will fail to delete a non-empty bucket, and fail to delete the ingress Kinesis +stream since Flink adds a fanout consumer to the stream which will block the deletion attempted by +Cloud Formation. + +### Provisioning via Terraform + +Requires installing the [Terraform CLI](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) + +Steps + +```shell +cd aws-terraform +# Configure your AWS profile, set AWS env vars, or run 'aws configure sso', etc +terraform init +terraform apply # When prompted, enter 'yes' +``` +Immediately after entering 'yes' to the prompt issued by `terraform apply`, switch to another shell/terminal tab and +upload the application JAR file to the S3 bucket. The upload may fail if the S3 bucket has not been created by +Terraform yet, so keep trying until it succeeds. + +```shell +export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar \ + s3://flink-tf-demo-bucket-${AWS_ACCOUNT_ID}/ +``` +Wait for the `terraform apply` command to complete. + +#### Monitor the CloudWatch logging output + +The following script will show all the log entries from the start of application launch, and will +wait for new entries to arrive and display them too. The script will resume from where it +left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. +```shell +./demo-tail-logs.sh +``` + +#### Send sample events to the ingress stream +```shell +./demo-send-events.sh +``` + +#### Get and display the events published to the egress stream +This script will show all events published to the egress stream since the start of application launch, and will +wait for new entries to arrive and display them too. +```shell +./demo-tail-egress.sh +``` + +#### Cleanup +Cleanup by manually deleting the jar file from the S3 bucket, `flink-tf-demo-bucket-${AWS_ACCOUNT_ID}`, and the Kinesis +stream `flink-tf-demo-ingress`. Run the `terraform destroy` command. Note that the manual deletions are required +since Terraform can't delete a non-empty bucket, and can't delete the ingress stream since Flink adds a fanout consumer +to the stream which will block the deletion attempted by Terraform. + +Alternatively, you can run the following commands to clean up the resources: +```shell +export AWS_ACCOUNT_ID=516535517513 # Imagine Learning Sandbox account +aws s3 rm --recursive s3://flink-tf-demo-bucket-${AWS_ACCOUNT_ID}/ +aws kinesis delete-stream --enforce-consumer-deletion --stream-name flink-tf-demo-ingress +terraform destroy # When prompted, enter 'yes' +``` + +### Provisioning via Crossplane + +#### Prerequisites: +- Docker +- idpbuilder (https://github.com/cnoe-io/idpbuilder) +- kubectl +- jq +- go lang + +#### Introduction +This demodemonstrates that it is possible to provision and run an AWS Managed Flink application via Crossplane. Many +tasks normally performed via CI/CD must be completed manually as described below. The compositions for S3 buckets and +Kinesis streams currently use `function-patch-and-transform`, but the Managed Flink composition uses a custom function. + + +#### Instructions + +The files to run the crossplane demo are in the [aws-crossplane](./aws-crossplane) directory. +``` +cd aws-crossplane +``` + +##### Update the AWS credentials in the local environment + +Login to AWS Identity Center, and copy the AWS credential environment variables commands from Access Keys page. + +Paste and execute the AWS environment variable commands. + +Set the AWS_ACCOUNT environment variable to your AWS account number, or run `aws configure sso` / `aws sso login`. +Setting the account number explicitly is optional if it can be determined instead via `aws sts get-caller-identity`. + +Finally, run the following script to update the AWS credentials for the local environment: +```shell +./local/aws/update_credentials.sh +``` + +##### Launch and configure a Kubernetes cluster using the "idpbuilder" tool + +Run `./launch-and-config-idp.sh` + +This script will launch a local Kubernetes cluster using `kind`, and configure the cluster with the necessary +Crossplane providers and resources. It also builds and uploads the docker image for the Managed Flink composition +function. + +The script takes a few minutes to complete. + +##### Provision AWS Managed Flink via Crossplane claims + +Provision the S3 bucket and Kinesis streams... +``` +kubectl apply -f claims/demo-setup-claims.yaml +``` + +Wait for the resources to become synchronized and ready by checking the output of the following command: +``` +kubectl get managed +``` +The output of `kubectl get managed` will reveal the actual S3 bucket name under `EXTERNAL-NAME`. + +Return to AWS Identity Center and launch the web console for the account. + +Visit the S3 services page. Find the S3 bucket (flink-cp-demo-bucket-*) and upload the following file to the bucket +- `../target/my-stateful-functions-embedded-java-3.3.0.jar` (Flink demo application code) + +Alternatively, use the AWS CLI to upload the file... +``` +flink_bucket_name=$(kubectl get managed | grep bucket | awk '{print $4}') +aws s3 cp ../target/my-stateful-functions-embedded-java-3.3.0.jar s3://${flink_bucket_name}/ +``` + +##### Provision the Managed Flink application + +Apply the following claim to trigger the creation of the Flink application, its role, log group, and log stream. +Note at the time of this writing, the Flink application is not configured with the log stream to workaround a bug in +the Crossplane provider (https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419). + +``` +kubectl apply -f claims/managed-flink-claim.yaml +``` + + +Wait until the Flink application is in the 'Running' state. This may take a few minutes. + +#### Monitor the CloudWatch logging output + +See the note above re: logging config. Until the bug is fixed, no log output will be available. + +The following script will show all the log entries from the start of application launch, and will +wait for new entries to arrive and display them too. The script will resume from where it +left off if shut down via Ctrl-C. To start from scratch, remove the `.cwlogs` directory. +```shell +./demo-tail-logs.sh +``` + +#### Send sample events to the ingress stream +```shell +./demo-send-events.sh +``` + +#### Get and display the events published to the egress stream +This script will show all events published to the egress stream since the start of application launch, and will +wait for new entries to arrive and display them too. +```shell +./demo-tail-egress.sh +``` + +#### Cleanup + +Manually delete the files in the S3 bucket, and delete the Kinesis stream `flink-demo-ingress` (the Flink application +adds a fanout consumer to the stream which will block any deletion attempted by Crossplane). + +``` +aws s3 rm --recursive s3://$(aws s3 ls | grep flink-cp-demo | awk3) +aws kinesis delete-stream --enforce-consumer-deletion --stream-name flink-cp-demo-ingress +``` + +Run the following commands to delete the remaining resources: +``` +kubectl delete -f claims/managed-flink-claim.yaml +kubectl delete -f claims/demo-setup-claims.yaml +``` + +Shut down the local IDP with the command: +``` +idpbuilder delete +``` + diff --git a/aws-cloudformation/demo-send-events.sh b/aws-cloudformation/demo-send-events.sh new file mode 100755 index 0000000..7699690 --- /dev/null +++ b/aws-cloudformation/demo-send-events.sh @@ -0,0 +1,17 @@ +#! /bin/bash + +if [ $(uname) = "Darwin" ]; then + MD5SUM=md5 +else + MD5SUM=md5sum +fi + +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep FlinkCfDemoIngressStream) + +grep -v test.action ../src/test/resources/product-cart-integration-test-events.jsonl | while read line; do + partkey=$(echo $line | $MD5SUM | awk '{print $1}') + data=$(echo $line | base64) + cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" + echo $cmd + eval $cmd +done diff --git a/aws-cloudformation/demo-tail-egress.sh b/aws-cloudformation/demo-tail-egress.sh new file mode 100755 index 0000000..f198bb8 --- /dev/null +++ b/aws-cloudformation/demo-tail-egress.sh @@ -0,0 +1,25 @@ +#! /bin/bash + +set -e + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep FlinkCfDemoEgressStream) + +get_records_response=$(mktemp) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +while [ "true" ]; do + aws kinesis get-records --shard-iterator $shard_iterator >$get_records_response + shard_iterator=$(cat $get_records_response | jq -crM .NextShardIterator) + record_count=0 + for encoded_data in $(cat $get_records_response | jq -crM .Records[].Data); do + record_count=$(expr $record_count + 1) + echo $encoded_data | base64 -d | jq . + done + if [ $record_count -eq 0 ]; then + sleep 2 + fi +done + + diff --git a/aws-cloudformation/demo-tail-logs.sh b/aws-cloudformation/demo-tail-logs.sh new file mode 100755 index 0000000..a2009fe --- /dev/null +++ b/aws-cloudformation/demo-tail-logs.sh @@ -0,0 +1,40 @@ +#! /bin/bash + +set -e + +cd $(dirname $0) + +NEXT_TOKEN_ARG= + +CWLOGS_DIR=.cwlogs +mkdir -p $CWLOGS_DIR + +ITERATION=1 + +if [ -f $CWLOGS_DIR/next.token ]; then + NEXT_TOKEN_ARG="--next-token $(cat $CWLOGS_DIR/next.token)" +fi + +while true; do + CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json + aws logs get-log-events \ + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name FlinkCfDemoLogGroup \ + --log-stream-name FlinkCfDemoLogStream \ + >$CWLOG_FILE + + NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) + echo $NEXT_TOKEN >$CWLOGS_DIR/next.token + NEXT_TOKEN_ARG="--next-token $NEXT_TOKEN" + EVENT_COUNT=$(cat $CWLOG_FILE | jq -crM '.events | length') + + if [[ $EVENT_COUNT == 0 ]]; then + sleep 2 + rm $CWLOG_FILE + else + cat $CWLOG_FILE | jq -crM '.events[] | [.timestamp,(.message | fromjson | [.messageType,.logger,.message] | join(" "))] | join(" ")' | tee -a $CWLOGS_DIR/formatted.log + fi + + ITERATION=$(echo "1 + $ITERATION" | bc) +done diff --git a/aws-cloudformation/flink-cf-demo-bucket-stack.yaml b/aws-cloudformation/flink-cf-demo-bucket-stack.yaml new file mode 100644 index 0000000..9fc4e11 --- /dev/null +++ b/aws-cloudformation/flink-cf-demo-bucket-stack.yaml @@ -0,0 +1,35 @@ +# A CloudFormation stack for the S3 bucket to which the flink stateful functions job JAR file will be uploaded. +# Once this bucket has been created, upload the JAR file, then create the managed-flink-poc stack. +Description: "Bucket and policy to hold statefun JAR file for Managed Flink proof-of-concept. Contact: Ken Ellinwood" +Resources: + CodeBucket: + Type: AWS::S3::Bucket + Properties: + BucketName: !Sub ${AWS::StackName}-${AWS::AccountId} + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: aws:kms + KMSMasterKeyID: alias/aws/s3 + CodeBucketBucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: !Ref CodeBucket + PolicyDocument: + Id: RequireEncryptionInTransit + Version: '2012-10-17' + Statement: + - Principal: '*' + Action: '*' + Effect: Deny + Resource: + - !GetAtt CodeBucket.Arn + - !Sub ${CodeBucket.Arn}/* + Condition: + Bool: + aws:SecureTransport: 'false' +Outputs: + FlinkCfDemoCodeBucketArn: + Value: !GetAtt CodeBucket.Arn + Export: + Name: FlinkCfDemoCodeBucketArn # Exported for use by the stack defined in flink-cf-demo-stack.yaml diff --git a/aws-cloudformation/flink-cf-demo-stack.yaml b/aws-cloudformation/flink-cf-demo-stack.yaml new file mode 100644 index 0000000..2073ed2 --- /dev/null +++ b/aws-cloudformation/flink-cf-demo-stack.yaml @@ -0,0 +1,236 @@ +# A CloudFormation stack containing all resources except for the S3 bucket containing the statefun application JAR. +Description: "Stack to run Managed Flink proof-of-concept. Contact: Ken Ellinwood" +Resources: + FlinkCfDemoIngressStream: + Type: AWS::Kinesis::Stream + Properties: + ShardCount: 1 + StreamEncryption: + EncryptionType: KMS + KeyId: alias/aws/kinesis + StreamModeDetails: + StreamMode: PROVISIONED + FlinkCfDemoEgressStream: + Type: AWS::Kinesis::Stream + Properties: + ShardCount: 1 + StreamEncryption: + EncryptionType: KMS + KeyId: alias/aws/kinesis + StreamModeDetails: + StreamMode: PROVISIONED + FlinkCfDemoLogGroup: + Type: 'AWS::Logs::LogGroup' + Properties: + LogGroupName: FlinkCfDemoLogGroup + RetentionInDays: 7 + UpdateReplacePolicy: Delete + DeletionPolicy: Delete + FlinkCfDemoLogStream: + Type: 'AWS::Logs::LogStream' + Properties: + LogGroupName: + Ref: FlinkCfDemoLogGroup + LogStreamName: FlinkCfDemoLogStream + UpdateReplacePolicy: Delete + DeletionPolicy: Delete + FlinkCfDemoIAMRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: + - kinesisanalytics.amazonaws.com + Action: sts:AssumeRole + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonKinesisFullAccess + - arn:aws:iam::aws:policy/AmazonS3FullAccess + - arn:aws:iam::aws:policy/CloudWatchFullAccess + Path: / + Policies: + - PolicyDocument: + Statement: + - Action: + - 'kinesis:DescribeStream' + - 'kinesis:GetRecords' + - 'kinesis:GetShardIterator' + - 'kinesis:ListShards' + Effect: Allow + Resource: + - 'Fn::GetAtt': + - FlinkCfDemoIngressStream + - Arn + - 'Fn::GetAtt': + - FlinkCfDemoEgressStream + - Arn + Version: '2012-10-17' + PolicyName: AccessKDSPolicy + - PolicyDocument: + Statement: + - Action: + - 'logs:DescribeLogGroups' + - 'logs:DescribeLogStreams' + - 'logs:PutLogEvents' + Effect: Allow + Resource: + 'Fn::GetAtt': + - FlinkCfDemoLogGroup + - Arn + Version: '2012-10-17' + PolicyName: AccessCWLogsPolicy + - PolicyDocument: + Statement: + - Action: 'cloudwatch:PutMetricData' + Effect: Allow + Resource: '*' + Version: '2012-10-17' + PolicyName: AccessCWMetricsPolicy + FlinkCfDemoApplication: + Type: AWS::KinesisAnalyticsV2::Application + Properties: + ApplicationName: !Sub ${AWS::StackName}-application + ApplicationDescription: 'Managed Flink demo application provisioned via CloudFormation' + RuntimeEnvironment: 'FLINK-1_18' + ServiceExecutionRole: !GetAtt FlinkCfDemoIAMRole.Arn + ApplicationConfiguration: + EnvironmentProperties: + PropertyGroups: + - PropertyGroupId: 'StatefunApplicationProperties' + PropertyMap: + EVENTS_INGRESS_STREAM_DEFAULT: !Ref FlinkCfDemoIngressStream + EVENTS_EGRESS_STREAM_DEFAULT: !Ref FlinkCfDemoEgressStream + AWS_REGION: !Ref AWS::Region + FlinkApplicationConfiguration: + CheckpointConfiguration: + ConfigurationType: 'CUSTOM' + CheckpointingEnabled: True + CheckpointInterval: 60000 # Every minute # Increase this to 300000 in production (every 5 minutes) + MinPauseBetweenCheckpoints: 500 + MonitoringConfiguration: + ConfigurationType: 'CUSTOM' + MetricsLevel: 'APPLICATION' + LogLevel: 'INFO' + ParallelismConfiguration: + ConfigurationType: 'CUSTOM' + Parallelism: 1 + ParallelismPerKPU: 1 + AutoScalingEnabled: True + ApplicationSnapshotConfiguration: + SnapshotsEnabled: True + ApplicationCodeConfiguration: + CodeContent: + S3ContentLocation: + BucketARN: !ImportValue FlinkCfDemoCodeBucketArn # Created and exported by the stack defined in flink-cf-demo-bucket-stack.yaml + FileKey: "my-stateful-functions-embedded-java-3.3.1.jar" + CodeContentType: 'ZIPFILE' + FlinkCfDemoCustomResource: + Description: Invokes FlinkCfDemoCRLambda to update and start the Flink application via API calls + Type: AWS::CloudFormation::CustomResource + DependsOn: FlinkCfDemoCRLambda + Version: "1.0" + Properties: + ServiceToken: !GetAtt FlinkCfDemoCRLambda.Arn + Region: !Ref AWS::Region + ApplicationName: !Ref FlinkCfDemoApplication + # LogStream ARN format: arn:aws:logs:REGION:ACCOUNT_NUMBER:log-group:LOG_GROUP_NAME:log-stream:LOG_STREAM_NAME + # We get most of this from the LogGroup ARN, then remove the trailing "*" and append "log-stream:LOG_STREAM_NAME" + LogStreamArn: !Join [ "", [ !Select [ 0, !Split [ "*", !GetAtt FlinkCfDemoLogGroup.Arn ] ], "log-stream:", !Ref FlinkCfDemoLogStream ] ] + FlinkCfDemoCRLambdaRole: + Type: AWS::IAM::Role + DependsOn: + - FlinkCfDemoApplication + - FlinkCfDemoLogStream + Properties: + Description: A role for the custom resource lambda to use while interacting with an application. + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - sts:AssumeRole + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonKinesisAnalyticsFullAccess + - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess + Path: / + FlinkCfDemoCRLambda: + Type: AWS::Lambda::Function + DependsOn: FlinkCfDemoCRLambdaRole + Properties: + Description: Configures logging and starts the Flink application + Runtime: python3.8 + Role: !GetAtt FlinkCfDemoCRLambdaRole.Arn + Handler: index.lambda_handler + Timeout: 30 + Code: + ZipFile: | + import logging + import cfnresponse + import boto3 + + logger = logging.getLogger() + logger.setLevel(logging.INFO) + + def lambda_handler(event, context): + logger.info('Incoming CFN event {}'.format(event)) + + try: + event_type = event['RequestType'] + resource_props = event['ResourceProperties'] + application_name = resource_props['ApplicationName'] + + # Ignore events other than Create or Update, + if event_type not in ['Create', 'Update']: + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) + return + + # kinesisanalyticsv2 API reference: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesisanalyticsv2.html + client_kda = boto3.client('kinesisanalyticsv2', region_name=event['ResourceProperties']['Region']) + + describe_response = client_kda.describe_application(ApplicationName=application_name) + logger.info(f'describe_application response: {describe_response}') + + if event_type == 'Create': + # Add cloudwatch logging option + log_stream_arn = resource_props['LogStreamArn'] + conditional_token = describe_response['ApplicationDetail']['ConditionalToken'] + response = client_kda.add_application_cloud_watch_logging_option( + ApplicationName = application_name, + CloudWatchLoggingOption = { + 'LogStreamARN': log_stream_arn + }, + ConditionalToken = conditional_token + ) + logger.info(f'add_application_cloud_watch_logging_option response: {response}') + + # get application status. + application_status = describe_response['ApplicationDetail']['ApplicationStatus'] + + # an application can be started from 'READY' status only. + if application_status != 'READY': + logger.info('No-op for Application {} because ApplicationStatus {} is filtered'.format(application_name, application_status)) + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) + + return + + # create RunConfiguration. + run_configuration = { + 'ApplicationRestoreConfiguration': { + 'ApplicationRestoreType': 'RESTORE_FROM_LATEST_SNAPSHOT', + } + } + + logger.info('RunConfiguration for Application {}: {}'.format(application_name, run_configuration)) + + # this call doesn't wait for an application to transfer to 'RUNNING' state. + client_kda.start_application(ApplicationName=application_name, RunConfiguration=run_configuration) + logger.info('Started Application: {}'.format(application_name)) + cfnresponse.send(event, context, cfnresponse.SUCCESS, {}) + except Exception as err: + logger.error(err) + cfnresponse.send(event,context, cfnresponse.FAILED, {"Data": str(err)}) diff --git a/aws-crossplane/claims/demo-setup-claims.yaml b/aws-crossplane/claims/demo-setup-claims.yaml new file mode 100644 index 0000000..7b9e797 --- /dev/null +++ b/aws-crossplane/claims/demo-setup-claims.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: example.com/v1alpha1 +kind: S3Bucket +metadata: + name: flink-cp-demo-bucket + namespace: default +spec: + resourceConfig: + region: us-east-2 +--- +apiVersion: example.com/v1alpha1 +kind: KinesisStream +metadata: + name: flink-cp-demo-ingress + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: flink-cp-demo-ingress + streamMode: PROVISIONED + shardCount: 1 + retentionPeriod: 26 + tags: + createdBy: ken.ellinwood@imaginelearning.com + purpose: statefun-ingress +--- +apiVersion: example.com/v1alpha1 +kind: KinesisStream +metadata: + name: flink-cp-demo-egress + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: flink-cp-demo-egress + streamMode: PROVISIONED + shardCount: 1 + retentionPeriod: 26 + tags: + createdBy: ken.ellinwood@imaginelearning.com + purpose: statefun-egress diff --git a/aws-crossplane/claims/managed-flink-claim.yaml b/aws-crossplane/claims/managed-flink-claim.yaml new file mode 100644 index 0000000..4744a9b --- /dev/null +++ b/aws-crossplane/claims/managed-flink-claim.yaml @@ -0,0 +1,40 @@ +apiVersion: example.com/v1alpha1 +kind: ManagedFlink +metadata: + name: flink-cp-demo + namespace: default +spec: + resourceConfig: + codeBucket: flink-cp-demo-bucket + codeFile: my-stateful-functions-embedded-java-3.3.0.jar + additionalPermissions: + managedPolicyArns: + - "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + inlinePolicies: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-ingress", + "arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:PutRecord" + ] + } + ] + } + environmentProperties: + - propertyGroup: + - propertyGroupId: StatefunApplicationProperties + propertyMap: + EVENTS_INGRESS_STREAM_DEFAULT: flink-cp-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: flink-cp-demo-egress diff --git a/aws-crossplane/demo-send-events.sh b/aws-crossplane/demo-send-events.sh new file mode 100755 index 0000000..183e2b4 --- /dev/null +++ b/aws-crossplane/demo-send-events.sh @@ -0,0 +1,17 @@ +#! /bin/bash + +set -e +MD5CMD=md5 + +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-cp-demo-ingress) +if [ -z "$stream_name" ]; then + echo "Stream not found" + exit 1 +fi +grep -v test.action ../src/test/resources/product-cart-integration-test-events.jsonl | while read line; do + partkey=$(echo $line | $MD5CMD | awk '{print $1}') + data=$(echo $line | base64) + cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" + echo $cmd + eval $cmd +done diff --git a/aws-crossplane/demo-tail-egress.sh b/aws-crossplane/demo-tail-egress.sh new file mode 100755 index 0000000..566e659 --- /dev/null +++ b/aws-crossplane/demo-tail-egress.sh @@ -0,0 +1,23 @@ +#! /bin/bash + +set -e + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-cp-demo-egress) + +get_records_response=$(mktemp) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +while [ "true" ]; do + aws kinesis get-records --shard-iterator $shard_iterator >$get_records_response + shard_iterator=$(cat $get_records_response | jq -crM .NextShardIterator) + record_count=0 + for encoded_data in $(cat $get_records_response | jq -crM .Records[].Data); do + record_count=$(expr $record_count + 1) + echo $encoded_data | base64 -d | jq . + done + if [ $record_count -eq 0 ]; then + sleep 2 + fi +done diff --git a/aws-crossplane/demo-tail-logs.sh b/aws-crossplane/demo-tail-logs.sh new file mode 100755 index 0000000..f1c4d5a --- /dev/null +++ b/aws-crossplane/demo-tail-logs.sh @@ -0,0 +1,40 @@ +#! /bin/bash + +set -e + +cd $(dirname $0) + +NEXT_TOKEN_ARG= + +CWLOGS_DIR=.cwlogs +mkdir -p $CWLOGS_DIR + +ITERATION=1 + +if [ -f $CWLOGS_DIR/next.token ]; then + NEXT_TOKEN_ARG="--next-token $(cat $CWLOGS_DIR/next.token)" +fi + +while true; do + CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json + aws logs get-log-events \ + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name flink-cp-demo-log-group \ + --log-stream-name flink-cp-demo-log-stream \ + >$CWLOG_FILE + + NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) + echo $NEXT_TOKEN >$CWLOGS_DIR/next.token + NEXT_TOKEN_ARG="--next-token $NEXT_TOKEN" + EVENT_COUNT=$(cat $CWLOG_FILE | jq -crM '.events | length') + + if [[ $EVENT_COUNT == 0 ]]; then + sleep 2 + rm $CWLOG_FILE + else + cat $CWLOG_FILE | jq -crM '.events[] | [.timestamp,(.message | fromjson | [.messageType,.logger,.message] | join(" "))] | join(" ")' | tee -a $CWLOGS_DIR/formatted.log + fi + + ITERATION=$(echo "1 + $ITERATION" | bc) +done diff --git a/aws-crossplane/launch-and-config-idp.sh b/aws-crossplane/launch-and-config-idp.sh new file mode 100755 index 0000000..4860a00 --- /dev/null +++ b/aws-crossplane/launch-and-config-idp.sh @@ -0,0 +1,82 @@ +#! /bin/bash + +# A helper script to launch and configure a local IDP +cd $(dirname $0) + +function main() { + + # Default to AWS, but allow localstack as well + cloud=${1:-aws} + if [ $cloud = "aws" ]; then + # Verify that the credentials are set and not expired + if grep =REPLACE local/aws/manifests/credentials.yaml >/dev/null || \ + grep 000000000000 local/aws/manifests/credentials.yaml >/dev/null || \ + [[ $(echo $(date +%s) - $(stat -f%m local/aws/manifests/credentials.yaml) | bc) -gt 43200 ]]; then + echo "The credentials in ./local/aws/manifests/credentials.yaml appear to have expired. Consider running ./local/aws/update_credentials.sh" + + # I haven't figured out how to refresh the credentials w/o restarting the cluster, so delete the cluster + if [ "$(idpbuilder get clusters)" ]; then + idpbuilder delete + fi + exit 1 + fi + fi + + if [ -z $(idpbuilder get clusters) ]; then + echo "Running: idpbuilder create -p local/$cloud" + idpbuilder create -p local/$cloud + echo + fi + + echo "Waiting for gitea to be ready..." + wait_for_pods gitea my-gitea + echo + + echo "Building and loading the docker image for the Managed Flink composition function" + ./xfn/configure-xfn.sh + echo + + echo "Waiting for the Crossplane AWS providers to be ready..." + wait_for_pods crossplane-system provider-aws + echo + + + echo "Waiting for the Crossplane AWS provider configs to be ready..." + until [[ $(kubectl get providerconfigs 2>&1 | grep aws | wc -l) -eq 2 ]]; do + sleep 2 + done + echo + + echo "Loading the Crossplane Composite Resource Definitions and Compositions" + for i in $(find resources -name \*xrd.yaml -o -name \*comp.yaml); do + kubectl apply -f $i + done + echo + + echo "The system is ready for claims to be applied" +} + +# Wait for pods in the given namespace to be running +function wait_for_pods() { + namespace=$1 + pod_name_prefix=$2 + + running=0 + total=0 + + until [[ $total != 0 && $total == $running ]]; do + + sleep 2 + running=0 + total=0 + + for i in $(kubectl -n ${namespace} get pods | grep $pod_name_prefix | grep -v Completed | awk '{print $3}'); do + if [ $i == "Running" ]; then + running=$(echo $running + 1 | bc) + fi + total=$(echo $total + 1 | bc) + done + done +} + +main "$@" diff --git a/aws-crossplane/local/aws/cert-creation/job.yaml b/aws-crossplane/local/aws/cert-creation/job.yaml new file mode 100644 index 0000000..bcd1e68 --- /dev/null +++ b/aws-crossplane/local/aws/cert-creation/job.yaml @@ -0,0 +1,57 @@ +# Get the certificate and create a configmap +apiVersion: batch/v1 +kind: Job +metadata: + name: cert-job + annotations: + argocd.argoproj.io/hook: PreSync +spec: + template: + spec: + serviceAccountName: cert-sa + containers: + - name: cert-container + image: bitnami/kubectl:1.30.3 + command: ["sh", "-c"] + args: + - |- + sleep 20 + cert=$(kubectl get secret -n default idpbuilder-cert -o go-template='{{range $k,$v := .data}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}') + kubectl create configmap cert -n crossplane-system --from-literal=ca.crt="$cert" || echo "failed to create configmap" + restartPolicy: Never +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-role + annotations: + argocd.argoproj.io/hook: PreSync +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "describe"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-role +subjects: +- kind: ServiceAccount + name: cert-sa + namespace: crossplane-system \ No newline at end of file diff --git a/aws-crossplane/local/aws/crossplane.yaml b/aws-crossplane/local/aws/crossplane.yaml new file mode 100644 index 0000000..5f1c2e1 --- /dev/null +++ b/aws-crossplane/local/aws/crossplane.yaml @@ -0,0 +1,35 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane + namespace: argocd + labels: + env: dev + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + sources: + - repoURL: 'https://charts.crossplane.io/stable' + targetRevision: 1.17.1 + helm: + releaseName: crossplane + values: | + args: + - "--enable-environment-configs" + registryCaBundleConfig: + name: "cert" + key: "ca.crt" + chart: crossplane + - repoURL: cnoe://cert-creation + targetRevision: HEAD + path: "." + destination: + server: 'https://kubernetes.default.svc' + namespace: crossplane-system + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/aws-services.yaml b/aws-crossplane/local/aws/manifests/aws-services.yaml new file mode 100644 index 0000000..53c4979 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/aws-services.yaml @@ -0,0 +1,63 @@ +apiVersion: pkg.crossplane.io/v1beta1 +kind: DeploymentRuntimeConfig +metadata: + name: debug-config +spec: + deploymentTemplate: + spec: + selector: {} + template: + spec: + containers: + - name: package-runtime + args: + - --debug +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-s3 +spec: + package: xpkg.upbound.io/upbound/provider-aws-s3:v1.21.1 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-iam +spec: + package: xpkg.upbound.io/upbound/provider-aws-iam:v1.21.1 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesis +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesis:v1.21.1 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesisanalyticsv2 +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.21.1 + controllerConfigRef: + name: aws-config + runtimeConfigRef: + apiVersion: pkg.crossplane.io/v1beta1 + kind: DeploymentRuntimeConfig + name: debug-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-cloudwatchlogs +spec: + package: xpkg.upbound.io/upbound/provider-aws-cloudwatchlogs:v1.21.1 + controllerConfigRef: + name: aws-config diff --git a/aws-crossplane/local/aws/manifests/core.yaml b/aws-crossplane/local/aws/manifests/core.yaml new file mode 100644 index 0000000..8e1eca5 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/core.yaml @@ -0,0 +1,86 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: upbound-provider-family-aws +spec: + package: xpkg.upbound.io/upbound/provider-family-aws:v1.11.0 + controllerConfigRef: + name: aws-config +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-helm +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-helm:v0.14.0 + controllerConfigRef: + name: provider-helm +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-kubernetes +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.7.0 + controllerConfigRef: + name: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: provider-helm + namespace: crossplane-system +spec: + serviceAccountName: provider-helm +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: provider-kubernetes + namespace: crossplane-system +spec: + serviceAccountName: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: aws-config +spec: + args: + - --debug + podSecurityContext: + fsGroup: 2000 + credentials: + secretRef: + key: creds + name: aws-secret + namespace: crossplane-system + source: Secret +--- +apiVersion: aws.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: aws-provider + namespace: crossplane-system + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + secretRef: + key: creds + name: aws-secret + namespace: crossplane-system + source: Secret +--- +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: kubernetes-provider + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + source: InjectedIdentity \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/credentials.yaml b/aws-crossplane/local/aws/manifests/credentials.yaml new file mode 100644 index 0000000..7456042 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/credentials.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +stringData: + creds: | + [default] + aws_access_key_id=REPLACE + aws_secret_access_key=REPLACE + aws_session_token=REPLACE +kind: Secret +metadata: + creationTimestamp: null + name: aws-secret + namespace: crossplane-system +--- +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: EnvironmentConfig +metadata: + name: aws-env-config +data: + awsAccountID: "000000000000" diff --git a/aws-crossplane/local/aws/manifests/functions.yaml b/aws-crossplane/local/aws/manifests/functions.yaml new file mode 100644 index 0000000..cedea8a --- /dev/null +++ b/aws-crossplane/local/aws/manifests/functions.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-environment-configs +spec: + package: xpkg.upbound.io/crossplane-contrib/function-environment-configs:v0.2.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-auto-ready +spec: + package: xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.4.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-patch-and-transform +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 +--- +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink +spec: + package: gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 + ignoreCrossplaneConstraints: true + skipDependencyResolution: true diff --git a/aws-crossplane/local/aws/manifests/provider-config.yaml b/aws-crossplane/local/aws/manifests/provider-config.yaml new file mode 100644 index 0000000..9c8900d --- /dev/null +++ b/aws-crossplane/local/aws/manifests/provider-config.yaml @@ -0,0 +1,14 @@ +apiVersion: aws.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: provider-aws + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + secretRef: + key: creds + name: aws-secret + namespace: crossplane-system + source: Secret \ No newline at end of file diff --git a/aws-crossplane/local/aws/manifests/secret.yaml b/aws-crossplane/local/aws/manifests/secret.yaml new file mode 100644 index 0000000..3b0bfd0 --- /dev/null +++ b/aws-crossplane/local/aws/manifests/secret.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: init-secret + namespace: crossplane-system +stringData: + password: "password" \ No newline at end of file diff --git a/aws-crossplane/local/aws/providers.yaml b/aws-crossplane/local/aws/providers.yaml new file mode 100644 index 0000000..0e9694b --- /dev/null +++ b/aws-crossplane/local/aws/providers.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane-configs + namespace: argocd +spec: + destination: + namespace: crossplane-system + server: "https://kubernetes.default.svc" + source: + repoURL: cnoe://manifests + targetRevision: HEAD + path: "." + project: default + syncPolicy: + automated: + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/aws/update_credentials.sh b/aws-crossplane/local/aws/update_credentials.sh new file mode 100755 index 0000000..9f366d6 --- /dev/null +++ b/aws-crossplane/local/aws/update_credentials.sh @@ -0,0 +1,34 @@ +#! /bin/bash + +# Transfer account number and key/secret/session tokens to the local Crossplane AWS provider +cd $(dirname $0) + +if [ -z "$AWS_ACCOUNT" ]; then + # Attempt to resolve the account number via get-caller-identity + AWS_ACCOUNT=$(aws sts get-caller-identity --query Account --output text) + if [ -z "$AWS_ACCOUNT" ]; then + echo "AWS_ACCOUNT is not set, and finding it via \`aws sts get-caller-identity\` failed." + exit 1 + fi +fi + +required_vars="AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN" +for var in ${required_vars}; do + val=$(eval "echo \$$var") + if [ -z "$val" ]; then + echo "$var is not set" + exit 1 + fi +done + +git restore manifests/credentials.yaml + +cat manifests/credentials.yaml | \ + sed "s!000000000000!$AWS_ACCOUNT!" | \ + sed "s!aws_access_key_id=REPLACE!aws_access_key_id=$AWS_ACCESS_KEY_ID!" | \ + sed "s!aws_secret_access_key=REPLACE!aws_secret_access_key=$AWS_SECRET_ACCESS_KEY!" | \ + sed "s!aws_session_token=REPLACE!aws_session_token=$AWS_SESSION_TOKEN!" >manifests/credentials.yaml.tmp +mv manifests/credentials.yaml.tmp manifests/credentials.yaml + +echo "Run this command to clear env vars:" +echo "unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN" diff --git a/aws-crossplane/local/localstack/cert-creation/job.yaml b/aws-crossplane/local/localstack/cert-creation/job.yaml new file mode 100644 index 0000000..bcd1e68 --- /dev/null +++ b/aws-crossplane/local/localstack/cert-creation/job.yaml @@ -0,0 +1,57 @@ +# Get the certificate and create a configmap +apiVersion: batch/v1 +kind: Job +metadata: + name: cert-job + annotations: + argocd.argoproj.io/hook: PreSync +spec: + template: + spec: + serviceAccountName: cert-sa + containers: + - name: cert-container + image: bitnami/kubectl:1.30.3 + command: ["sh", "-c"] + args: + - |- + sleep 20 + cert=$(kubectl get secret -n default idpbuilder-cert -o go-template='{{range $k,$v := .data}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}') + kubectl create configmap cert -n crossplane-system --from-literal=ca.crt="$cert" || echo "failed to create configmap" + restartPolicy: Never +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-role + annotations: + argocd.argoproj.io/hook: PreSync +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "describe"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + argocd.argoproj.io/hook: PreSync + name: cert-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-role +subjects: +- kind: ServiceAccount + name: cert-sa + namespace: crossplane-system \ No newline at end of file diff --git a/aws-crossplane/local/localstack/configs/functions.yaml b/aws-crossplane/local/localstack/configs/functions.yaml new file mode 100644 index 0000000..cedea8a --- /dev/null +++ b/aws-crossplane/local/localstack/configs/functions.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-environment-configs +spec: + package: xpkg.upbound.io/crossplane-contrib/function-environment-configs:v0.2.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-auto-ready +spec: + package: xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.4.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-patch-and-transform +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0 +--- +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink +spec: + package: gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 + ignoreCrossplaneConstraints: true + skipDependencyResolution: true diff --git a/aws-crossplane/local/localstack/configs/local-secret.yaml b/aws-crossplane/local/localstack/configs/local-secret.yaml new file mode 100644 index 0000000..92809e8 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/local-secret.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + name: local-secret + namespace: crossplane-system +stringData: + creds: | + [default] + aws_access_key_id = replaceme + aws_secret_access_key = replaceme + aws_session_token = replaceme +--- +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: EnvironmentConfig +metadata: + name: aws-env-config +data: + awsAccountID: "000000000000" diff --git a/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml b/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml new file mode 100644 index 0000000..4b6369a --- /dev/null +++ b/aws-crossplane/local/localstack/configs/provider-config-localstack.yaml @@ -0,0 +1,30 @@ +apiVersion: aws.upbound.io/v1beta1 +kind: ProviderConfig +metadata: + name: provider-aws + annotations: + argocd.argoproj.io/sync-wave: "20" + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: local-secret + key: creds + endpoint: + services: + - iam + - s3 + - kinesis + - cloudwatch + - logs + - kinesisanalyticsv2 + hostnameImmutable: true + url: + type: Static + static: http://localstack.localstack.svc.cluster.local:4566 + skip_credentials_validation: true + skip_metadata_api_check: true + skip_requesting_account_id: true + s3_use_path_style: true \ No newline at end of file diff --git a/aws-crossplane/local/localstack/configs/providers.yaml b/aws-crossplane/local/localstack/configs/providers.yaml new file mode 100644 index 0000000..6a9aa43 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/providers.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-kubernetes +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-kubernetes:v0.7.0 + controllerConfigRef: + name: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: provider-kubernetes + namespace: crossplane-system +spec: + serviceAccountName: provider-kubernetes +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: upbound-provider-family-aws +spec: + package: xpkg.upbound.io/upbound/provider-family-aws:v1.11.0 diff --git a/aws-crossplane/local/localstack/configs/services.yaml b/aws-crossplane/local/localstack/configs/services.yaml new file mode 100644 index 0000000..4f89245 --- /dev/null +++ b/aws-crossplane/local/localstack/configs/services.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-s3 +spec: + package: xpkg.upbound.io/upbound/provider-aws-s3:v1.11.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-cloudwatchlogs +spec: + package: xpkg.upbound.io/upbound/provider-aws-cloudwatchlogs:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-iam +spec: + package: xpkg.upbound.io/upbound/provider-aws-iam:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesis + namespace: crossplane-system +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesis:v1.17.0 +--- +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-aws-kinesisanalyticsv2 + namespace: crossplane-system +spec: + package: xpkg.upbound.io/upbound/provider-aws-kinesisanalyticsv2:v1.17.0 diff --git a/aws-crossplane/local/localstack/crossplane-configs.yaml b/aws-crossplane/local/localstack/crossplane-configs.yaml new file mode 100644 index 0000000..0d57279 --- /dev/null +++ b/aws-crossplane/local/localstack/crossplane-configs.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane-configs + namespace: argocd +spec: + destination: + namespace: crossplane-system + server: "https://kubernetes.default.svc" + source: + repoURL: cnoe://configs + targetRevision: HEAD + path: "." + project: default + syncPolicy: + automated: + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/localstack/crossplane.yaml b/aws-crossplane/local/localstack/crossplane.yaml new file mode 100644 index 0000000..f2e50c5 --- /dev/null +++ b/aws-crossplane/local/localstack/crossplane.yaml @@ -0,0 +1,33 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crossplane + namespace: argocd + labels: + env: dev + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + sources: + - repoURL: 'https://charts.crossplane.io/stable' + targetRevision: 1.17.1 + helm: + releaseName: crossplane + values: | + registryCaBundleConfig: + name: "cert" + key: "ca.crt" + chart: crossplane + - repoURL: cnoe://cert-creation + targetRevision: HEAD + path: "." + destination: + server: 'https://kubernetes.default.svc' + namespace: crossplane-system + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/localstack/localstack.yaml b/aws-crossplane/local/localstack/localstack.yaml new file mode 100644 index 0000000..1f8d06b --- /dev/null +++ b/aws-crossplane/local/localstack/localstack.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: localstack + namespace: argocd + labels: + example: localstack-integration +spec: + project: default + source: + repoURL: https://localstack.github.io/helm-charts + targetRevision: 0.6.12 + chart: localstack + helm: + releaseName: localstack + destination: + server: "https://kubernetes.default.svc" + namespace: localstack + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/aws-crossplane/local/localstack/port-forward-idp-localstack.sh b/aws-crossplane/local/localstack/port-forward-idp-localstack.sh new file mode 100644 index 0000000..6fd2316 --- /dev/null +++ b/aws-crossplane/local/localstack/port-forward-idp-localstack.sh @@ -0,0 +1,7 @@ +#! /bin/sh + +# Port-forward port 4566 to the LocalStack service running in the Kubernetes cluster +# This script requires the jq command: https://jqlang.github.io/jq/ + +localstack_pod_name=$(kubectl -n localstack get pods -o json | jq -cr .items[0].metadata.name) +kubectl -n localstack port-forward $localstack_pod_name 4566:4566 diff --git a/aws-crossplane/resources/flink/flink-comp.yaml b/aws-crossplane/resources/flink/flink-comp.yaml new file mode 100644 index 0000000..fd6abfd --- /dev/null +++ b/aws-crossplane/resources/flink/flink-comp.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: managedflink.example.com +spec: + compositeTypeRef: + apiVersion: example.com/v1alpha1 + kind: XManagedFlink + mode: Pipeline + pipeline: + - step: environment-configs + functionRef: + name: function-environment-configs + input: + apiVersion: environmentconfigs.fn.crossplane.io/v1beta1 + kind: Input + spec: + environmentConfigs: + - type: Reference + ref: + name: aws-env-config + - step: render + functionRef: + name: function-managed-flink + - step: auto-ready-composite-resource # Auto-sets ready on the XR when the composed resources are ready + functionRef: + name: function-auto-ready diff --git a/aws-crossplane/resources/flink/flink-example-claim.yaml b/aws-crossplane/resources/flink/flink-example-claim.yaml new file mode 100644 index 0000000..2dc4327 --- /dev/null +++ b/aws-crossplane/resources/flink/flink-example-claim.yaml @@ -0,0 +1,42 @@ +apiVersion: example.com/v1alpha1 +kind: ManagedFlink +metadata: + name: flink-demo + namespace: default +spec: + resourceConfig: + codeBucket: flink-demo-bucket + codeFile: my-stateful-functions-embedded-java-3.3.0.jar + # The composition will generate a role with basic permissions for the application (e.g. logging, metrics), but + # it doesn't know what additional permissions the application needs. You can specify them here... + additionalPermissions: + managedPolicyArns: + - "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + inlinePolicies: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress", + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:PutRecord" + ] + } + ] + } + environmentProperties: + - propertyGroup: + - propertyGroupId: StatefunApplicationProperties + propertyMap: + EVENTS_INGRESS_STREAM_DEFAULT: flink-demo-ingress + EVENTS_EGRESS_STREAM_DEFAULT: flink-demo-egress diff --git a/aws-crossplane/resources/flink/flink-xrd.yaml b/aws-crossplane/resources/flink/flink-xrd.yaml new file mode 100644 index 0000000..cf6042b --- /dev/null +++ b/aws-crossplane/resources/flink/flink-xrd.yaml @@ -0,0 +1,96 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xmanagedflinks.example.com +spec: + group: example.com + names: + kind: XManagedFlink + plural: xmanagedflinks + claimNames: + kind: ManagedFlink + plural: managedflinks + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + resourceConfig: + type: object + properties: + region: + type: string + codeBucket: + type: string + codeFile: + type: string + runtime: + type: string + snapshotsEnabled: + type: string + checkpointingEnabled: + type: string + checkpointIntervalMillis: + type: number + logLevel: + type: string + metricsLevel: + type: string + autoScalingEnabled: + type: boolean + parallelism: + type: number + parallelismPerKpu: + type: number + applicationRestoreType: + type: string + snapshotName: + type: string + allowNonRestoredState: + type: boolean + additionalPermissions: + type: object + properties: + managedPolicyArns: + type: array + items: + type: string + inlinePolicies: + type: array + items: + type: object + properties: + name: + type: string + policy: + type: string + items: + type: string + environmentProperties: + type: array + items: + type: object + properties: + propertyGroup: + type: array + items: + type: object + properties: + propertyGroupId: + type: string + propertyMap: + type: object + additionalProperties: + type: string + required: + - codeBucket + - codeFile + - environmentProperties + required: + - resourceConfig diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml new file mode 100644 index 0000000..ec1d87f --- /dev/null +++ b/aws-crossplane/resources/kinesis/kinesis-stream-comp.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: kinesisstreams.example.com +spec: + compositeTypeRef: + apiVersion: example.com/v1alpha1 + kind: XKinesisStream + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: kinesis-stream + base: + apiVersion: kinesis.aws.upbound.io/v1beta1 + kind: Stream + metadata: + annotations: + meta.upbound.io/example-id: kinesis/v1beta1/stream + name: example + labels: + foo: bar + spec: + forProvider: + region: us-east-2 + retentionPeriod: 24 + shardCount: 1 + shardLevelMetrics: + - IncomingBytes + - OutgoingBytes + streamModeDetails: + - streamMode: PROVISIONED + tags: + createdBy: unknown-person + purpose: unknown-purpose + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.name + toFieldPath: metadata.name + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.region + toFieldPath: spec.forProvider.region + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.retentionPeriod + toFieldPath: spec.forProvider.retentionPeriod + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.streamMode + toFieldPath: spec.forProvider.streamModeDetails[0].streamMode + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.shardCount + toFieldPath: spec.forProvider.shardCount + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.tags.createdBy + toFieldPath: spec.forProvider.tags.createdBy + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.tags.purpose + toFieldPath: spec.forProvider.tags.purpose + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.arn + toFieldPath: status.streamArn + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.id + toFieldPath: status.streamName diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml new file mode 100644 index 0000000..e38e504 --- /dev/null +++ b/aws-crossplane/resources/kinesis/kinesis-stream-example-claim.yaml @@ -0,0 +1,15 @@ +apiVersion: example.com/v1alpha1 +kind: KinesisStream +metadata: + name: my-kinesis-stream + namespace: default +spec: + resourceConfig: + region: us-east-2 + name: my-kinesis-stream + streamMode: PROVISIONED + shardCount: 1 + retentionPeriod: 26 + tags: + createdBy: bart.simpson@the_simpsons.com + purpose: to demonstrate creation of a Kinesis stream \ No newline at end of file diff --git a/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml b/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml new file mode 100644 index 0000000..82b5a29 --- /dev/null +++ b/aws-crossplane/resources/kinesis/kinesis-stream-xrd.yaml @@ -0,0 +1,61 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xkinesisstreams.example.com +spec: + group: example.com + names: + kind: XKinesisStream + plural: xkinesisstreams + claimNames: + kind: KinesisStream + plural: kinesisstreams + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + description: KinesisStreamSpec defines the desired state of the stream + properties: + resourceConfig: + description: ResourceConfig defines general properties of this AWS resource. + properties: + region: + type: string + description: region for the stream, required + name: + type: string + streamMode: + type: string + enum: + - PROVISIONED + - ON_DEMAND + description: the capacity mode of the stream, must be PROVISIONED or ON_DEMAND + shardCount: + type: number + description: The number of shards when streamMode=PROVISIONED + retentionPeriod: + type: number + description: Retention period in hours, default is 24 + tags: + description: tags to apply to the stream + type: object + properties: + createdBy: + type: string + purpose: + type: string + type: object + type: object + status: + description: KinesisStreamStatus defines the observed state of stream + properties: + streamName: + type: string + streamArn: + type: string + type: object + type: object diff --git a/aws-crossplane/resources/s3/s3-bucket-comp.yaml b/aws-crossplane/resources/s3/s3-bucket-comp.yaml new file mode 100644 index 0000000..046e7d6 --- /dev/null +++ b/aws-crossplane/resources/s3/s3-bucket-comp.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: s3buckets.example.com +spec: + compositeTypeRef: + apiVersion: example.com/v1alpha1 + kind: XS3Bucket + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: s3-bucket + base: + apiVersion: s3.aws.upbound.io/v1beta1 + kind: Bucket + metadata: + annotations: + meta.upbound.io/example-id: s3/v1beta1/bucket + name: example + spec: + forProvider: + region: us-east-2 + providerConfigRef: + name: provider-aws + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: metadata.name + - type: FromCompositeFieldPath + fromFieldPath: spec.resourceConfig.region + toFieldPath: spec.forProvider.region + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.arn + toFieldPath: status.bucketArn + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.id + toFieldPath: status.bucketName diff --git a/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml b/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml new file mode 100644 index 0000000..0d4ec6c --- /dev/null +++ b/aws-crossplane/resources/s3/s3-bucket-example-claim.yaml @@ -0,0 +1,8 @@ +apiVersion: example.com/v1alpha1 +kind: S3Bucket +metadata: + name: flink-demo-bucket + namespace: default +spec: + resourceConfig: + region: us-east-2 diff --git a/aws-crossplane/resources/s3/s3-bucket-xrd.yaml b/aws-crossplane/resources/s3/s3-bucket-xrd.yaml new file mode 100644 index 0000000..1bee0a7 --- /dev/null +++ b/aws-crossplane/resources/s3/s3-bucket-xrd.yaml @@ -0,0 +1,35 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xs3buckets.example.com +spec: + group: example.com + names: + kind: XS3Bucket + plural: xs3buckets + claimNames: + kind: S3Bucket + plural: s3buckets + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + properties: + resourceConfig: + properties: + region: + type: string + type: object + type: object + status: + properties: + bucketName: + type: string + bucketArn: + type: string + type: object + type: object diff --git a/aws-crossplane/xfn/.gitignore b/aws-crossplane/xfn/.gitignore new file mode 100644 index 0000000..0e34e68 --- /dev/null +++ b/aws-crossplane/xfn/.gitignore @@ -0,0 +1,22 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +function-managed-flink.code-workspace diff --git a/aws-crossplane/xfn/.golangci.yml b/aws-crossplane/xfn/.golangci.yml new file mode 100644 index 0000000..1cbfbd0 --- /dev/null +++ b/aws-crossplane/xfn/.golangci.yml @@ -0,0 +1,208 @@ +run: + timeout: 10m + + skip-files: + - "zz_generated\\..+\\.go$" + +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + # [deprecated] comma-separated list of pairs of the form pkg:regex + # the regex is used to ignore names within pkg. (default "fmt:.*"). + # see https://github.com/kisielk/errcheck#the-deprecated-method for details + ignore: fmt:.*,io/ioutil:^Read.* + + govet: + # report about shadowed variables + check-shadowing: false + + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + + gci: + custom-order: true + sections: + - standard + - default + - prefix(github.com/crossplane) + - prefix(github.com/crossplane-contrib) + - blank + - dot + + gocyclo: + # minimal code complexity to report, 30 by default (but we recommend 10-20) + min-complexity: 10 + + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + + dupl: + # tokens count to trigger issue, 150 by default + threshold: 100 + + goconst: + # minimal length of string constant, 3 by default + min-len: 3 + # minimal occurrences count to trigger, 3 by default + min-occurrences: 5 + + lll: + # tab width in spaces. Default to 1. + tab-width: 1 + + unparam: + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 30 + + prealloc: + # XXX: we don't recommend using this linter before doing performance profiling. + # For most programs usage of prealloc will be a premature optimization. + + # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # True by default. + simple: true + range-loops: true # Report preallocation suggestions on range loops, true by default + for-loops: false # Report preallocation suggestions on for loops, false by default + + gocritic: + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. + # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - performance + + settings: # settings passed to gocritic + captLocal: # must be valid enabled check name + paramsOnly: true + rangeValCopy: + sizeThreshold: 32 + + nolintlint: + require-explanation: true + require-specific: true + + +linters: + enable: + - megacheck + - govet + - gocyclo + - gocritic + - goconst + - gci + - gofmt # We enable this as well as goimports for its simplify mode. + - prealloc + - revive + - unconvert + - misspell + - nakedret + - nolintlint + + disable: + # These linters are all deprecated as of golangci-lint v1.49.0. We disable + # them explicitly to avoid the linter logging deprecation warnings. + - deadcode + - varcheck + - scopelint + - structcheck + - interfacer + + presets: + - bugs + - unused + fast: false + + +issues: + # Excluding configuration per-path and per-linter + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test(ing)?\.go + linters: + - gocyclo + - errcheck + - dupl + - gosec + - scopelint + - unparam + + # Ease some gocritic warnings on test files. + - path: _test\.go + text: "(unnamedResult|exitAfterDefer)" + linters: + - gocritic + + # These are performance optimisations rather than style issues per se. + # They warn when function arguments or range values copy a lot of memory + # rather than using a pointer. + - text: "(hugeParam|rangeValCopy):" + linters: + - gocritic + + # This "TestMain should call os.Exit to set exit code" warning is not clever + # enough to notice that we call a helper method that calls os.Exit. + - text: "SA3000:" + linters: + - staticcheck + + - text: "k8s.io/api/core/v1" + linters: + - goimports + + # This is a "potential hardcoded credentials" warning. It's triggered by + # any variable with 'secret' in the same, and thus hits a lot of false + # positives in Kubernetes land where a Secret is an object type. + - text: "G101:" + linters: + - gosec + - gas + + # This is an 'errors unhandled' warning that duplicates errcheck. + - text: "G104:" + linters: + - gosec + - gas + + # Some k8s dependencies do not have JSON tags on all fields in structs. + - path: k8s.io/ + linters: + - musttag + + # Independently from option `exclude` we use default exclude patterns, + # it can be disabled by this option. To list all + # excluded by default patterns execute `golangci-lint run --help`. + # Default value for this option is true. + exclude-use-default: false + + # Show only new issues: if there are unstaged changes or untracked files, + # only those changes are analyzed, else only changes in HEAD~ are analyzed. + # It's a super-useful option for integration of golangci-lint into existing + # large codebase. It's not practical to fix all existing issues at the moment + # of integration: much better don't allow issues in new code. + # Default is false. + new: false + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/aws-crossplane/xfn/Dockerfile b/aws-crossplane/xfn/Dockerfile new file mode 100644 index 0000000..6ce07b1 --- /dev/null +++ b/aws-crossplane/xfn/Dockerfile @@ -0,0 +1,35 @@ +FROM golang:1.23 as build-stage + +WORKDIR /fn + +COPY . . +RUN go mod download + + +RUN CGO_ENABLED=0 go build -o /function . + +FROM debian:12.1-slim as package-stage + +# TODO(negz): Use a proper Crossplane package building tool. We're abusing the +# fact that this image won't have an io.crossplane.pkg: base annotation. This +# means Crossplane package manager will pull this entire ~100MB image, which +# also happens to contain a valid Function runtime. +# https://github.com/crossplane/crossplane/blob/v1.13.2/contributing/specifications/xpkg.md +WORKDIR /package +COPY package/ ./ + +RUN cat crossplane.yaml > /package.yaml +RUN cat input/*.yaml >> /package.yaml + +FROM gcr.io/distroless/base-debian11 AS build-release-stage + +WORKDIR / + +COPY --from=build-stage /function /function +COPY --from=package-stage /package.yaml /package.yaml + +EXPOSE 9443 + +USER nonroot:nonroot + +ENTRYPOINT ["/function", "--debug"] diff --git a/aws-crossplane/xfn/LICENSE b/aws-crossplane/xfn/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/aws-crossplane/xfn/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/aws-crossplane/xfn/NOTES.txt b/aws-crossplane/xfn/NOTES.txt new file mode 100644 index 0000000..7af36b2 --- /dev/null +++ b/aws-crossplane/xfn/NOTES.txt @@ -0,0 +1,9 @@ +To get started: + +1. Replace `function-template-go` with your function in `go.mod`, + `package/crossplane.yaml`, and any Go imports. (You can also do this + automatically by running the `./init.sh ` script.) +2. Update `input/v1beta1/` to reflect your desired input (and run `go generate ./...`) +3. Add your logic to `RunFunction` in `fn.go` +4. Add tests for your logic in `fn_test.go` +5. Update `README.md`, to be about your function! diff --git a/aws-crossplane/xfn/README.md b/aws-crossplane/xfn/README.md new file mode 100644 index 0000000..cdf7fb4 --- /dev/null +++ b/aws-crossplane/xfn/README.md @@ -0,0 +1,44 @@ +# function-template-go +[![CI](https://github.com/crossplane/function-template-go/actions/workflows/ci.yml/badge.svg)](https://github.com/crossplane/function-template-go/actions/workflows/ci.yml) + +A template for writing a [composition function][functions] in [Go][go]. + +To learn how to use this template: + +* [Follow the guide to writing a composition function in Go][function guide] +* [Learn about how composition functions work][functions] +* [Read the function-sdk-go package documentation][package docs] + +If you just want to jump in and get started: + +1. Replace `function-template-go` with your function in `go.mod`, + `package/crossplane.yaml`, and any Go imports. (You can also do this + automatically by running the `./init.sh ` script.) +1. Update `input/v1beta1/` to reflect your desired input (and run `go generate ./...`) +1. Add your logic to `RunFunction` in `fn.go` +1. Add tests for your logic in `fn_test.go` +1. Update this file, `README.md`, to be about your function! + +This template uses [Go][go], [Docker][docker], and the [Crossplane CLI][cli] to +build functions. + +```shell +# Run code generation - see input/generate.go +$ go generate ./... + +# Run tests - see fn_test.go +$ go test ./... + +# Build the function's runtime image - see Dockerfile +$ docker build . --tag=runtime + +# Build a function package - see package/crossplane.yaml +$ crossplane xpkg build -f package --embed-runtime-image=runtime +``` + +[functions]: https://docs.crossplane.io/latest/concepts/composition-functions +[go]: https://go.dev +[function guide]: https://docs.crossplane.io/knowledge-base/guides/write-a-composition-function-in-go +[package docs]: https://pkg.go.dev/github.com/crossplane/function-sdk-go +[docker]: https://www.docker.com +[cli]: https://docs.crossplane.io/latest/cli diff --git a/aws-crossplane/xfn/configure-xfn.sh b/aws-crossplane/xfn/configure-xfn.sh new file mode 100755 index 0000000..142866e --- /dev/null +++ b/aws-crossplane/xfn/configure-xfn.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Run this script to deploy the function to the local running IDP +cd $(dirname $0) + +go generate ./... + +# Build the Docker image +docker build -t function-managed-flink . + +# Tag the image with the Gitea server URL +docker tag function-managed-flink gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 + +# Log in to the Gitea server +idpbuilder get secrets -p gitea -o json | jq -r '.[0].data.password' | docker login -u giteaAdmin --password-stdin gitea.cnoe.localtest.me:8443 + +# Push the image to the Gitea server +docker push gitea.cnoe.localtest.me:8443/giteaadmin/function-managed-flink:1 diff --git a/aws-crossplane/xfn/example/README.md b/aws-crossplane/xfn/example/README.md new file mode 100644 index 0000000..8b6a134 --- /dev/null +++ b/aws-crossplane/xfn/example/README.md @@ -0,0 +1,25 @@ +# Example manifests + +You can run your function locally and test it using `crossplane beta render` +with these example manifests. + +```shell +# Run the function locally +$ go run . --insecure --debug +``` + +```shell +# Then, in another terminal, call it with these example manifests +$ crossplane beta render xr.yaml composition.yaml functions.yaml -r +--- +apiVersion: example.crossplane.io/v1 +kind: XR +metadata: + name: example-xr +--- +apiVersion: render.crossplane.io/v1beta1 +kind: Result +message: I was run with input "Hello world"! +severity: SEVERITY_NORMAL +step: run-the-template +``` diff --git a/aws-crossplane/xfn/example/composition.yaml b/aws-crossplane/xfn/example/composition.yaml new file mode 100644 index 0000000..da038b0 --- /dev/null +++ b/aws-crossplane/xfn/example/composition.yaml @@ -0,0 +1,27 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: function-managed-flink +spec: + compositeTypeRef: + apiVersion: example.crossplane.io/v1 + kind: XR + mode: Pipeline + pipeline: + - step: environment-configs + functionRef: + name: function-environment-configs + input: + apiVersion: environmentconfigs.fn.crossplane.io/v1beta1 + kind: Input + spec: + environmentConfigs: + - type: Reference + ref: + name: aws-env-config + - step: run-the-function + functionRef: + name: function-managed-flink + input: + apiVersion: template.fn.crossplane.io/v1beta1 + kind: Input diff --git a/aws-crossplane/xfn/example/extra-resources.yaml b/aws-crossplane/xfn/example/extra-resources.yaml new file mode 100644 index 0000000..41428ee --- /dev/null +++ b/aws-crossplane/xfn/example/extra-resources.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: apiextensions.crossplane.io/v1alpha1 +kind: EnvironmentConfig +metadata: + name: aws-env-config +data: + awsAccountID: "000000000000" diff --git a/aws-crossplane/xfn/example/functions.yaml b/aws-crossplane/xfn/example/functions.yaml new file mode 100644 index 0000000..69ee0e2 --- /dev/null +++ b/aws-crossplane/xfn/example/functions.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: pkg.crossplane.io/v1 +kind: Function +metadata: + name: function-environment-configs +spec: + package: xpkg.upbound.io/crossplane-contrib/function-environment-configs:v0.2.0 +--- +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink + annotations: + # This tells crossplane beta render to connect to the function locally. + render.crossplane.io/runtime: Development +spec: + # This is ignored when using the Development runtime. + package: function-managed-flink diff --git a/aws-crossplane/xfn/example/xr.yaml b/aws-crossplane/xfn/example/xr.yaml new file mode 100644 index 0000000..09f0579 --- /dev/null +++ b/aws-crossplane/xfn/example/xr.yaml @@ -0,0 +1,47 @@ +apiVersion: example.com/v1alpha1 +kind: XManagedFlink +metadata: + name: flink-example-nkhnm +spec: + claimRef: + apiVersion: example.com/v1alpha1 + kind: ManagedFlink + name: flink-example + namespace: default + resourceConfig: + region: us-east-2 + account: "000000000000" + codeBucket: flink-example-bucket + codeFile: example-flink-app-1.0-SNAPSHOT.jar + additionalPermissions: + managedPolicyArns: + - "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + inlinePolicies: + - name: kinesis_policy + policy: | + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-ingress", + "arn:aws:kinesis:us-east-2:000000000000:stream/flink-demo-egress" + ], + "Action": [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:PutRecord" + ] + } + ] + } + environmentProperties: + - propertyGroup: + - propertyGroupId: MyApplicationProperties + propertyMap: + EVENTS_EGRESS_STREAM_DEFAULT: example-egress-kinesis-stream + EVENTS_INGRESS_STREAM_DEFAULT: example-ingress-kinesis-stream + parallelism: 1 diff --git a/aws-crossplane/xfn/fn.go b/aws-crossplane/xfn/fn.go new file mode 100644 index 0000000..1160b50 --- /dev/null +++ b/aws-crossplane/xfn/fn.go @@ -0,0 +1,499 @@ +package main + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/fieldpath" + "github.com/crossplane/crossplane-runtime/pkg/logging" + fncontext "github.com/crossplane/function-sdk-go/context" + fnv1 "github.com/crossplane/function-sdk-go/proto/v1" + "github.com/crossplane/function-sdk-go/request" + "github.com/crossplane/function-sdk-go/resource" + "github.com/crossplane/function-sdk-go/response" +) + +// Function returns whatever response you ask it to. +type Function struct { + fnv1.UnimplementedFunctionRunnerServiceServer + + log logging.Logger +} + +func getEnvironmentConfig(req *fnv1.RunFunctionRequest) (*unstructured.Unstructured, error) { + env := &unstructured.Unstructured{} + if v, ok := request.GetContextKey(req, fncontext.KeyEnvironment); ok { + if err := resource.AsObject(v.GetStructValue(), env); err != nil { + return env, fmt.Errorf("cannot get Composition environment from %T context key %q", req, fncontext.KeyEnvironment) + } + } + return env, nil +} + +// Helper function to create an array containing a map. In Unstructured, the arrays must be of type []interface{}, +// otherwise the Unstructured can't be coverted to Struct. +func arrayWithMap(value map[string]interface{}) []interface{} { + result := make([]interface{}, 1) + result[0] = value + return result +} + +func arrayWithMaps(values []map[string]interface{}) []interface{} { + result := make([]interface{}, len(values)) + for i, v := range values { + result[i] = v + } + return result +} + +// Get a value from the composite at the given path, and if not found return the defaultValue +func getValue(oxr *resource.Composite, path string, defaultValue any) (any, error) { + v, err := oxr.Resource.GetValue(path) + if err != nil { + if fieldpath.IsNotFound(err) { + return defaultValue, nil + } + } + return v, err +} + +func getArrayValue(oxr *resource.Composite, path string, defaultValue []interface{}) ([]interface{}, error) { + v, err := oxr.Resource.GetValue(path) + if err != nil { + if fieldpath.IsNotFound(err) { + return defaultValue, nil + } + } + array, ok := v.([]interface{}) + if ok { + return array, nil + } + return nil, errors.Errorf("Value at %s is not an array", path) +} + +const FLINK_APP_RESOURCE_NAME resource.Name = "flink-application" +const LOG_GROUP_RESOURCE_NAME resource.Name = "flink-log-group" +const LOG_STREAM_RESOURCE_NAME resource.Name = "flink-log-stream" +const ROLE_RESOURCE_NAME resource.Name = "flink-role" + +func RenderManagedFlinkResources(req *fnv1.RunFunctionRequest, rsp *fnv1.RunFunctionResponse, oxr *resource.Composite, log logging.Logger) (*fnv1.RunFunctionRequest, *fnv1.RunFunctionResponse) { + + desired, err := request.GetDesiredComposedResources(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot get desired composed resources in %T", req)) + return req, rsp + } + + observed, err := request.GetObservedComposedResources(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot get observed composed resources in %T", req)) + return req, rsp + } + + envConfig, err := getEnvironmentConfig(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot get env config in %T", rsp)) + return req, rsp + } + + err = GenerateManagedFlink(rsp, desired, observed, oxr, log) + if err != nil { + response.Fatal(rsp, errors.Wrap(err, "failed to render ManagedFlink resource")) + return req, rsp + } + + err = GenerateRole(rsp, envConfig, desired, observed, oxr, log) + if err != nil { + response.Fatal(rsp, errors.Wrap(err, "failed to render Role resource")) + return req, rsp + } + + err = GenerateLogGroup(rsp, desired, observed, oxr, log) + if err != nil { + response.Fatal(rsp, errors.Wrap(err, "failed to render LogGroup resource")) + return req, rsp + } + + err = GenerateLogStream(rsp, desired, observed, oxr, log) + if err != nil { + response.Fatal(rsp, errors.Wrap(err, "failed to render LogStream resource")) + return req, rsp + } + if err := response.SetDesiredComposedResources(rsp, desired); err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot set desired composed resources in %T", rsp)) + return req, rsp + } + + oxr.Resource.SetValue("metadata.managedFields", nil) + + if err := response.SetDesiredCompositeResource(rsp, oxr); err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot set desired composite resource in %T", rsp)) + return req, rsp + } + + return req, rsp +} + +func GenerateManagedFlink(rsp *fnv1.RunFunctionResponse, desired map[resource.Name]*resource.DesiredComposed, observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + // Fetch required values from oxr.spec.resourceConfig + codeBucket, err := oxr.Resource.GetValue("spec.resourceConfig.codeBucket") + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.codeBucket from %T", oxr)) + return err + } + codeFile, err := oxr.Resource.GetValue("spec.resourceConfig.codeFile") + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.codeFile from %T", oxr)) + return err + } + environmentProperties, err := oxr.Resource.GetValue("spec.resourceConfig.environmentProperties") + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.environmentProperties from %T", oxr)) + return err + } + + // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + startApplication, _ := getValue(oxr, "spec.resourceConfig.startApplication", true) + runtimeEnvironment, _ := getValue(oxr, "spec.resourceConfig.runtime", "FLINK-1_18") + snapshotsEnabled, _ := getValue(oxr, "spec.resourceConfig.snapshotsEnabled", true) + checkpointingEnabled, _ := getValue(oxr, "spec.resourceConfig.checkpointingEnabled", true) + checkpointIntervalMillis, _ := getValue(oxr, "spec.resourceConfig.checkpointIntervalMillis", 300000) // 5 minutes by default + logLevel, _ := getValue(oxr, "spec.resourceConfig.logLevel", "INFO") + metricsLevel, _ := getValue(oxr, "spec.resourceConfig.metricsLevel", "TASK") + autoScalingEnabled, _ := getValue(oxr, "spec.resourceConfig.autoScalingEnabled", false) + parallelism, _ := getValue(oxr, "spec.resourceConfig.parallelism", 1) + parallelismPerKpu, _ := getValue(oxr, "spec.resourceConfig.parallelismPerKpu", 1) + applicationRestoreType, _ := getValue(oxr, "spec.resourceConfig.applicationRestoreType", "RESTORE_FROM_LATEST_SNAPSHOT") + snapshotName, _ := getValue(oxr, "spec.resourceConfig.snapshotName", nil) + allowNonRestoredState, _ := getValue(oxr, "spec.resourceConfig.allowNonRestoredState", false) + + flinkAppDesired := resource.NewDesiredComposed() + desired[FLINK_APP_RESOURCE_NAME] = flinkAppDesired + + // Traverse environmentProperties and set AWS_REGION in all propertyMaps + epa := environmentProperties.([]interface{}) + for _, v := range epa { + epm := v.(map[string]interface{}) + pga := epm["propertyGroup"].([]interface{}) + for _, p := range pga { + pgm := p.(map[string]interface{}) + propertyMap := pgm["propertyMap"].(map[string]interface{}) + propertyMap["AWS_REGION"] = region + } + } + appRestoreConfig := map[string]interface{}{ + "applicationRestoreType": applicationRestoreType, + } + if snapshotName != nil { + appRestoreConfig["snapshotName"] = snapshotName + } + + flinkAppDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "kinesisanalyticsv2.aws.upbound.io/v1beta1", + "kind": "Application", + "metadata": map[string]interface{}{ + "name": oxr.Resource.GetClaimReference().Name, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "region": region, + "runtimeEnvironment": runtimeEnvironment, // "FLINK-1_18", + "applicationMode": "STREAMING", + "startApplication": startApplication, + "serviceExecutionRoleSelector": map[string]interface{}{ + "matchControllerRef": true, + }, + "applicationConfiguration": arrayWithMap(map[string]interface{}{ + "applicationCodeConfiguration": arrayWithMap(map[string]interface{}{ + "codeContentType": "ZIPFILE", + "codeContent": arrayWithMap(map[string]interface{}{ + "s3ContentLocation": arrayWithMap(map[string]interface{}{ + "fileKey": codeFile, + "bucketArnSelector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "crossplane.io/claim-name": codeBucket, + }, + }, + }), + }), + }), + "applicationSnapshotConfiguration": arrayWithMap(map[string]interface{}{ + "snapshotsEnabled": snapshotsEnabled, + }), + "environmentProperties": environmentProperties, + "flinkApplicationConfiguration": arrayWithMap(map[string]interface{}{ + "checkpointConfiguration": arrayWithMap(map[string]interface{}{ + "checkpointInterval": checkpointIntervalMillis, + "checkpointingEnabled": checkpointingEnabled, + "configurationType": "CUSTOM", + }), + "monitoringConfiguration": arrayWithMap(map[string]interface{}{ + "logLevel": logLevel, + "metricsLevel": metricsLevel, + "configurationType": "CUSTOM", + }), + "parallelismConfiguration": arrayWithMap(map[string]interface{}{ + "autoScalingEnabled": autoScalingEnabled, + "parallelism": parallelism, + "parallelismPerKpu": parallelismPerKpu, + "configurationType": "CUSTOM", + }), + }), + "runConfiguration": arrayWithMap(map[string]interface{}{ + "applicationRestoreConfiguration": arrayWithMap(appRestoreConfig), + "flinkRunConfiguration": arrayWithMap(map[string]interface{}{ + "allowNonRestoredState": allowNonRestoredState, + }), + }), + }), + // NOTE: For now, don't set cloudWatchLoggingOptions as a workaround for the endless + // Updating loop (https://github.com/crossplane-contrib/provider-upjet-aws/issues/1419) + + // "cloudwatchLoggingOptions": arrayWithMap(map[string]interface{}{ + // "logStreamArnSelector": map[string]interface{}{ + // "matchControllerRef": true, + // }, + // }), + }, + "providerConfigRef": map[string]interface{}{ + "name": "provider-aws", + }, + }, + } + + return nil // No error == Success +} + + +func GenerateLogGroup(rsp *fnv1.RunFunctionResponse, desired map[resource.Name]*resource.DesiredComposed, observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + logGroupDesired := resource.NewDesiredComposed() + desired[LOG_GROUP_RESOURCE_NAME] = logGroupDesired + + // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + logGroupName, _ := getValue(oxr, "spec.resourceConfig.logGroupName", oxr.Resource.GetClaimReference().Name+"-log-group") + retentionInDays, _ := getValue(oxr, "spec.resourceConfig.logRetentionInDays", 7) + + logGroupDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "cloudwatchlogs.aws.upbound.io/v1beta1", + "kind": "Group", + "metadata": map[string]interface{}{ + "name": logGroupName, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "region": region, + "retentionInDays": retentionInDays, + }, + "providerConfigRef": map[string]interface{}{ + "name": "provider-aws", + }, + }, + } + return nil // No error == Success +} + +func GenerateLogStream(rsp *fnv1.RunFunctionResponse, desired map[resource.Name]*resource.DesiredComposed, observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + logStreamDesired := resource.NewDesiredComposed() + desired[LOG_STREAM_RESOURCE_NAME] = logStreamDesired + + // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + logStreamName, _ := getValue(oxr, "spec.resourceConfig.logGroupName", oxr.Resource.GetClaimReference().Name+"-log-stream") + + logStreamDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "cloudwatchlogs.aws.upbound.io/v1beta1", + "kind": "Stream", + "metadata": map[string]interface{}{ + "name": logStreamName, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "region": region, + "name": logStreamName, + "logGroupNameSelector": map[string]interface{}{ + "matchControllerRef": true, + }, + }, + "providerConfigRef": map[string]interface{}{ + "name": "provider-aws", + }, + }, + } + return nil // No error == Success +} + +func GenerateRole(rsp *fnv1.RunFunctionResponse, envConfig *unstructured.Unstructured, desired map[resource.Name]*resource.DesiredComposed, + observed map[resource.Name]resource.ObservedComposed, oxr *resource.Composite, log logging.Logger) error { + roleDesired := resource.NewDesiredComposed() + desired[ROLE_RESOURCE_NAME] = roleDesired + + roleName := oxr.Resource.GetClaimReference().Name + "-role" + + // Fetch optional values from oxr.spec.resourceConfig + region, _ := getValue(oxr, "spec.resourceConfig.region", "us-east-2") + logGroupName, _ := getValue(oxr, "spec.resourceConfig.logGroupName", oxr.Resource.GetClaimReference().Name+"-log-group") + additionalManagedPolicyArns, err := getArrayValue(oxr, "spec.resourceConfig.additionalPermissions.managedPolicyArns", []interface{}{}) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.additionalPermissions.managedPolicyArns from %T", oxr)) + return err + } + additionalInlinePolicies, err := getArrayValue(oxr, "spec.resourceConfig.additionalPermissions.inlinePolicies", []interface{}{}) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get spec.resourceConfig.additionalPermissions.inlinePolicies from %T", oxr)) + return err + } + + assumeRolPolicy := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "kinesisanalytics.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + ` + + managedPolicyArnCount := 2 + len(additionalManagedPolicyArns) + + managedPolicyArns := make([]interface{}, managedPolicyArnCount) + managedPolicyArns[0] = "arn:aws:iam::aws:policy/AmazonS3FullAccess" + managedPolicyArns[1] = "arn:aws:iam::aws:policy/CloudWatchFullAccess" + for i, v := range additionalManagedPolicyArns { + managedPolicyArns[i+2] = v + } + + inlinePolicyCount := 2 + len(additionalInlinePolicies) + inlinePolicy := make([]map[string]interface{}, inlinePolicyCount) + + awsAccountID, ok := envConfig.Object["awsAccountID"] + if !ok { + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get awsAccountID from envConfig %T", envConfig)) + return err + } + } + + logGroupArn := fmt.Sprintf("arn:aws:logs:%s:%s:log-group:%s", region, awsAccountID, logGroupName) + + inlinePolicy[0] = map[string]interface{}{ + "name": "logs_policy", + "policy": fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": [ "%s" ], + "Action": [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ] + } + ] + }`, logGroupArn), + } + inlinePolicy[1] = map[string]interface{}{ + "name": "metrics_policy", + "policy": `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Resource": "*", + "Action": [ + "cloudwatch:PutMetricData" + ] + } + ] + }`, + } + for i, v := range additionalInlinePolicies { + m, ok := v.(map[string]interface{}) + if ok { + inlinePolicy[i+2] = m + } else { + message := fmt.Sprintf("Entry at spec.resourceConfig.additionalPermissions.inlinePolicies[%d] is not a map", i) + response.Fatal(rsp, errors.Wrap(err, message)) + return errors.New(message) + } + } + + roleDesired.Resource.Object = map[string]interface{}{ + "apiVersion": "iam.aws.upbound.io/v1beta1", + "kind": "Role", + "metadata": map[string]interface{}{ + "name": roleName, + }, + "spec": map[string]interface{}{ + "deletionPolicy": "Delete", // "Orphan", + "forProvider": map[string]interface{}{ + "assumeRolePolicy": assumeRolPolicy, + "managedPolicyArns": managedPolicyArns, + "inlinePolicy": arrayWithMaps(inlinePolicy), + }, + "providerConfigRef": map[string]interface{}{ + "name": "provider-aws", + }, + }, + } + return nil // No error == Success +} + +// RunFunction runs the Function. +func (f *Function) RunFunction(_ context.Context, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := response.To(req, response.DefaultTTL) + + /* + reqYaml, err := yaml.Marshal(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot marshal req to YAML %T", req)) + } + f.log.Info("Request", "YAML", string(reqYaml)) + */ + + oxr, err := request.GetObservedCompositeResource(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "Cannot get observed XR from %T", req)) + return rsp, nil + } + + metadataName, _ := getValue(oxr, "metadata.name", "nil") + f.log.Info("Running function", "metadata.name", metadataName) + + desired, err := request.GetDesiredComposedResources(req) + if err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot get desired composed resources in %T", req)) + return rsp, nil + } + + RenderManagedFlinkResources(req, rsp, oxr, f.log) + + if err := response.SetDesiredComposedResources(rsp, desired); err != nil { + response.Fatal(rsp, errors.Wrapf(err, "cannot set desired composed resources in %T", rsp)) + return rsp, err + + } + response.Normalf(rsp, "Normal response for metadata.name=%s", metadataName) + f.log.Info("Normal response", "metadata.name", metadataName) + + // You can set a custom status condition on the claim. This allows you to + // communicate with the user. See the link below for status condition + // guidance. + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + response.ConditionTrue(rsp, "FunctionSuccess", "Success"). + TargetCompositeAndClaim() + + return rsp, nil +} diff --git a/aws-crossplane/xfn/fn_test.go b/aws-crossplane/xfn/fn_test.go new file mode 100644 index 0000000..ff3d3db --- /dev/null +++ b/aws-crossplane/xfn/fn_test.go @@ -0,0 +1,130 @@ +package main + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + // "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + + "github.com/crossplane/crossplane-runtime/pkg/logging" + fnv1 "github.com/crossplane/function-sdk-go/proto/v1" + "github.com/crossplane/function-sdk-go/resource" + "github.com/crossplane/function-sdk-go/response" +) + +func TestRunFunction(t *testing.T) { + + type args struct { + ctx context.Context + req *fnv1.RunFunctionRequest + } + type want struct { + rsp *fnv1.RunFunctionResponse + err error + } + + cases := map[string]struct { + reason string + args args + want want + }{ + "ResponseIsReturned": { + reason: "The Function should return a fatal result if no input was specified", + args: args{ + req: &fnv1.RunFunctionRequest{ + Meta: &fnv1.RequestMeta{Tag: "hello"}, + Input: resource.MustStructJSON(`{ + "apiVersion": "template.fn.crossplane.io/v1beta1", + "kind": "Input" + }`), + Observed: &fnv1.State{ + Composite: &fnv1.Resource{ + Resource: resource.MustStructJSON(`{ + "apiVersion": "example.com/v1alpha1", + "kind": "XManagedFlink", + "metadata": { + "name": "flink-demo", + "namespace": "default" + }, + "spec": { + "resourceConfig": { + "region": "us-east-2", + "account": "000000000000", + "name": "flink-test", + "codeBucket": "flink-test-bucket", + "codeFile": "flink-test-app.jar", + "additionalPermissions": { + "managedPolicyArns": [ + "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" + ], + "inlinePolicies": [ + { + "name": "kinesis_policy", + "policy": " {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Resource\": [\n \"arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-ingress\",\n \"arn:aws:kinesis:us-east-2:516535517513:stream/flink-cp-demo-egress\"\n ],\n \"Action\": [\n \"kinesis:DescribeStream\",\n \"kinesis:GetRecords\",\n \"kinesis:GetShardIterator\",\n \"kinesis:ListShards\",\n \"kinesis:PutRecord\"\n ]\n }\n ]\n }\n" + } + ] + }, + "environmentProperties": [{ + "propertyGroup": [{ + "propertyGroupId": "StatefunApplicationProperties", + "propertyMap": { + "EVENTS_INGRESS_STREAM_DEFAULT": "flink-test-ingress", + "EVENTS_EGRESS_STREAM_DEFAULT": "flink-demo-egress" + } + }] + }] + }, + "claimRef": { + "apiVersion": "example.com/v1alpha1", + "kind": "ManagedFlink", + "name": "flink-test", + "namespace": "default" + } + } + }`), + }, + }, + }, + }, + want: want{ + rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hello", Ttl: durationpb.New(response.DefaultTTL)}, + Results: []*fnv1.Result{ + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + Message: "response.Normal(rsp)", + Target: fnv1.Target_TARGET_COMPOSITE.Enum(), + }, + }, + Conditions: []*fnv1.Condition{ + { + Type: "FunctionSuccess", + Status: fnv1.Status_STATUS_CONDITION_TRUE, + Reason: "Success", + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + f := &Function{log: logging.NewNopLogger()} + _ /*rsp,*/, err := f.RunFunction(tc.args.ctx, tc.args.req) + + // if diff := cmp.Diff(tc.want.rsp, rsp, protocmp.Transform()); diff != "" { + // t.Errorf("%s\nf.RunFunction(...): -want rsp, +got rsp:\n%s", tc.reason, diff) + // } + + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("%s\nf.RunFunction(...): -want err, +got err:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/aws-crossplane/xfn/go.mod b/aws-crossplane/xfn/go.mod new file mode 100644 index 0000000..618d4d6 --- /dev/null +++ b/aws-crossplane/xfn/go.mod @@ -0,0 +1,76 @@ +module github.com/crossplane/function-managed-flink + +go 1.23 + +toolchain go1.23.2 + +require ( + github.com/alecthomas/kong v0.9.0 + github.com/crossplane/crossplane-runtime v1.18.0 + github.com/crossplane/function-sdk-go v0.4.0 + github.com/google/go-cmp v0.6.0 + google.golang.org/protobuf v1.34.3-0.20240816073751-94ecbc261689 + k8s.io/apimachinery v0.31.0 + sigs.k8s.io/controller-tools v0.16.0 +) + +require ( + dario.cat/mergo v1.0.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fatih/color v1.17.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.25.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.31.0 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/client-go v0.31.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 // indirect + sigs.k8s.io/controller-runtime v0.19.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/aws-crossplane/xfn/go.sum b/aws-crossplane/xfn/go.sum new file mode 100644 index 0000000..f2de160 --- /dev/null +++ b/aws-crossplane/xfn/go.sum @@ -0,0 +1,311 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= +github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/kong v0.9.0 h1:G5diXxc85KvoV2f0ZRVuMsi45IrBgx9zDNGNj165aPA= +github.com/alecthomas/kong v0.9.0/go.mod h1:Y47y5gKfHp1hDc7CH7OeXgLIpp+Q2m1Ni0L5s3bI8Os= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/antchfx/htmlquery v1.2.4 h1:qLteofCMe/KGovBI6SQgmou2QNyedFUW+pE+BpeZ494= +github.com/antchfx/htmlquery v1.2.4/go.mod h1:2xO6iu3EVWs7R2JYqBbp8YzG50gj/ofqs5/0VZoDZLc= +github.com/antchfx/xpath v1.2.0 h1:mbwv7co+x0RwgeGAOHdrKy89GvHaGvxxBtPK0uF9Zr8= +github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossplane/crossplane-runtime v1.18.0 h1:aAQIMNOgPbbXaqj9CUSv+gPl3QnVbn33YlzSe145//0= +github.com/crossplane/crossplane-runtime v1.18.0/go.mod h1:p7nVVsLn0CWjsLvLCtr7T40ErbTgNWKRxmYnwFdfXb4= +github.com/crossplane/function-sdk-go v0.4.0 h1:1jd+UIaZlVNQCUO4hLAgUqWBRnUKw2ObF9ZuMw5CpKk= +github.com/crossplane/function-sdk-go v0.4.0/go.mod h1:jLnzUG8pt8tn/U6/uvtNStAhDjhIq4wCR31yECT54NM= +github.com/crossplane/upjet v1.4.1-0.20240911184956-3afbb7796d46 h1:2IH1YPTBrNmBj0Z1OCjEBTrQCuRaLutZbWLaswFeCFQ= +github.com/crossplane/upjet v1.4.1-0.20240911184956-3afbb7796d46/go.mod h1:wkdZf/Cvhr6PI30VdHIOjg4dX39Z5uijqnLWFk5PbGM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1 h1:xcuWappghOVI8iNWoF2OKahVejd1LSVi/v4JED44Amo= +github.com/go-json-experiment/json v0.0.0-20240815175050-ebd3a8989ca1/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 h1:c5FlPPgxOn7kJz3VoPLkQYQXGBS3EklQ4Zfi57uOuqQ= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-cty v1.4.1-0.20200723130312-85980079f637 h1:Ud/6/AdmJ1R7ibdS0Wo5MWPj0T1R0fkpaD087bBaW8I= +github.com/hashicorp/go-cty v1.4.1-0.20200723130312-85980079f637/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= +github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= +github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwkB2+kXouLC932Li9qelozrc= +github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= +github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tmccombs/hcl2json v0.3.3 h1:+DLNYqpWE0CsOQiEZu+OZm5ZBImake3wtITYxQ8uLFQ= +github.com/tmccombs/hcl2json v0.3.3/go.mod h1:Y2chtz2x9bAeRTvSibVRVgbLJhLJXKlUeIvjeVdnm4w= +github.com/upbound/provider-aws v1.14.0 h1:DDUdlMp+dNlFXXlhsGdCvQD7qFdT1AsEcaqlRU3BO14= +github.com/upbound/provider-aws v1.14.0/go.mod h1:IvyvgGlhRVr737E4P75tyD/i53hxnyO7KPM8bbXH+SU= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.34.3-0.20240816073751-94ecbc261689 h1:hNwajDgT0MlsxZzlUajZVmUYFpts8/CYe4BSNx503ZE= +google.golang.org/protobuf v1.34.3-0.20240816073751-94ecbc261689/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= +k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-tools v0.16.0 h1:EJPB+a5Bve861SPBPPWRbP6bbKyNxqK12oYT5zEns9s= +sigs.k8s.io/controller-tools v0.16.0/go.mod h1:0I0xqjR65YTfoO12iR+mZR6s6UAVcUARgXRlsu0ljB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/aws-crossplane/xfn/init.sh b/aws-crossplane/xfn/init.sh new file mode 100755 index 0000000..523fbb5 --- /dev/null +++ b/aws-crossplane/xfn/init.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +# This script helps initialize a new function project by +# replacing all instances of function-template-go with the +# name of your function. The scripts accepts two arguments: +# 1. The name of your function +# 2. The path to your function directory + +set -e + +cd "$2" || return + +# Replace function-template-go with the name of your function +# in go.mod +perl -pi -e s,function-template-go,"$1",g go.mod +# in fn.go +perl -pi -e s,function-template-go,"$1",g fn.go +# in examples +perl -pi -e s,function-template-go,"$1",g example/* + +echo "Function $1 has been initialised successfully" diff --git a/aws-crossplane/xfn/input/generate.go b/aws-crossplane/xfn/input/generate.go new file mode 100644 index 0000000..551821d --- /dev/null +++ b/aws-crossplane/xfn/input/generate.go @@ -0,0 +1,15 @@ +//go:build generate +// +build generate + +// NOTE(negz): See the below link for details on what is happening here. +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module + +// Remove existing and generate new input manifests +//go:generate rm -rf ../package/input/ +//go:generate go run -tags generate sigs.k8s.io/controller-tools/cmd/controller-gen paths=./v1beta1 object crd:crdVersions=v1 output:artifacts:config=../package/input + +package input + +import ( + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" //nolint:typecheck +) diff --git a/aws-crossplane/xfn/input/v1beta1/input.go b/aws-crossplane/xfn/input/v1beta1/input.go new file mode 100644 index 0000000..c506bfc --- /dev/null +++ b/aws-crossplane/xfn/input/v1beta1/input.go @@ -0,0 +1,24 @@ +// Package v1beta1 contains the input type for this Function +// +kubebuilder:object:generate=true +// +groupName=template.fn.crossplane.io +// +versionName=v1beta1 +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This isn't a custom resource, in the sense that we never install its CRD. +// It is a KRM-like object, so we generate a CRD to describe its schema. + +// TODO: Add your input type here! It doesn't need to be called 'Input', you can +// rename it to anything you like. + +// Input can be used to provide input to this Function. +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:categories=crossplane +type Input struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} diff --git a/aws-crossplane/xfn/input/v1beta1/zz_generated.deepcopy.go b/aws-crossplane/xfn/input/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..3b7e60f --- /dev/null +++ b/aws-crossplane/xfn/input/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Input) DeepCopyInto(out *Input) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input. +func (in *Input) DeepCopy() *Input { + if in == nil { + return nil + } + out := new(Input) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Input) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/aws-crossplane/xfn/main.go b/aws-crossplane/xfn/main.go new file mode 100644 index 0000000..31a3acb --- /dev/null +++ b/aws-crossplane/xfn/main.go @@ -0,0 +1,38 @@ +// Package main implements a Composition Function. +package main + +import ( + "github.com/alecthomas/kong" + + "github.com/crossplane/function-sdk-go" +) + +// CLI of this Function. +type CLI struct { + Debug bool `short:"d" help:"Emit debug logs in addition to info logs."` + + Network string `help:"Network on which to listen for gRPC connections." default:"tcp"` + Address string `help:"Address at which to listen for gRPC connections." default:":9443"` + TLSCertsDir string `help:"Directory containing server certs (tls.key, tls.crt) and the CA used to verify client certificates (ca.crt)" env:"TLS_SERVER_CERTS_DIR"` + Insecure bool `help:"Run without mTLS credentials. If you supply this flag --tls-server-certs-dir will be ignored."` + MaxRecvMessageSize int `help:"Maximum size of received messages in MB." default:"4"` +} + +// Run this Function. +func (c *CLI) Run() error { + log, err := function.NewLogger(c.Debug) + if err != nil { + return err + } + + return function.Serve(&Function{log: log}, + function.Listen(c.Network, c.Address), + function.MTLSCertificates(c.TLSCertsDir), + function.Insecure(c.Insecure), + function.MaxRecvMessageSize(c.MaxRecvMessageSize*1024*1024)) +} + +func main() { + ctx := kong.Parse(&CLI{}, kong.Description("A Crossplane Composition Function.")) + ctx.FatalIfErrorf(ctx.Run()) +} diff --git a/aws-crossplane/xfn/package/crossplane.yaml b/aws-crossplane/xfn/package/crossplane.yaml new file mode 100644 index 0000000..efa9e69 --- /dev/null +++ b/aws-crossplane/xfn/package/crossplane.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: meta.pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-managed-flink +spec: {} diff --git a/aws-crossplane/xfn/package/input/template.fn.crossplane.io_inputs.yaml b/aws-crossplane/xfn/package/input/template.fn.crossplane.io_inputs.yaml new file mode 100644 index 0000000..c7299ad --- /dev/null +++ b/aws-crossplane/xfn/package/input/template.fn.crossplane.io_inputs.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: inputs.template.fn.crossplane.io +spec: + group: template.fn.crossplane.io + names: + categories: + - crossplane + kind: Input + listKind: InputList + plural: inputs + singular: input + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: Input can be used to provide input to this Function. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + type: object + served: true + storage: true diff --git a/aws-crossplane/xfn/renovate.json b/aws-crossplane/xfn/renovate.json new file mode 100644 index 0000000..21e99bb --- /dev/null +++ b/aws-crossplane/xfn/renovate.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "crossplane": { + "fileMatch": ["(^|/)example/.*\\.ya?ml$"] + }, + "packageRules": [ + { + "matchManagers": ["crossplane"], + "matchFileNames": ["example/**"], + "groupName": "examples" + } + ], + "postUpdateOptions": [ + "gomodTidy", + "gomodUpdateImportPaths" + ] +} diff --git a/aws-terraform/demo-send-events.sh b/aws-terraform/demo-send-events.sh new file mode 100755 index 0000000..d9da9c6 --- /dev/null +++ b/aws-terraform/demo-send-events.sh @@ -0,0 +1,21 @@ +#! /bin/bash + +set -e +if [ $(uname) = "Darwin" ]; then + MD5SUM=md5 +else + MD5SUM=md5sum +fi + +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-tf-demo-ingress) +if [ -z "$stream_name" ]; then + echo "Stream not found" + exit 1 +fi +grep -v test.action ../src/test/resources/product-cart-integration-test-events.jsonl | while read line; do + partkey=$(echo $line | $MD5SUM | awk '{print $1}') + data=$(echo $line | base64) + cmd="aws kinesis put-record --stream-name $stream_name --partition-key $partkey --data $data" + echo $cmd + eval $cmd +done diff --git a/aws-terraform/demo-tail-egress.sh b/aws-terraform/demo-tail-egress.sh new file mode 100755 index 0000000..7749263 --- /dev/null +++ b/aws-terraform/demo-tail-egress.sh @@ -0,0 +1,24 @@ +#! /bin/bash + +set -e + +# Get the events sent to the egress stream +stream_name=$(aws kinesis list-streams | jq -crM .StreamNames[] | grep flink-tf-demo-egress) + +get_records_response=$(mktemp) + +shard_id=$(aws kinesis list-shards --stream-name $stream_name | jq -crM .Shards[0].ShardId) +shard_iterator=$(aws kinesis get-shard-iterator --shard-id $shard_id --shard-iterator-type TRIM_HORIZON --stream-name $stream_name | jq -crM .ShardIterator) +while [ "true" ]; do + aws kinesis get-records --shard-iterator $shard_iterator >$get_records_response + shard_iterator=$(cat $get_records_response | jq -crM .NextShardIterator) + record_count=0 + for encoded_data in $(cat $get_records_response | jq -crM .Records[].Data); do + record_count=$(expr $record_count + 1) + echo $encoded_data | base64 -d | jq . + done + if [ $record_count -eq 0 ]; then + sleep 2 + fi +done + diff --git a/aws-terraform/demo-tail-logs.sh b/aws-terraform/demo-tail-logs.sh new file mode 100755 index 0000000..bc24d7b --- /dev/null +++ b/aws-terraform/demo-tail-logs.sh @@ -0,0 +1,40 @@ +#! /bin/bash + +set -e + +cd $(dirname $0) + +NEXT_TOKEN_ARG= + +CWLOGS_DIR=.cwlogs +mkdir -p $CWLOGS_DIR + +ITERATION=1 + +if [ -f $CWLOGS_DIR/next.token ]; then + NEXT_TOKEN_ARG="--next-token $(cat $CWLOGS_DIR/next.token)" +fi + +while true; do + CWLOG_FILE=$CWLOGS_DIR/$(printf "%010d" $ITERATION).json + aws logs get-log-events \ + --start-from-head \ + $NEXT_TOKEN_ARG \ + --log-group-name flink-tf-demo-log-group \ + --log-stream-name flink-tf-demo-log-stream \ + >$CWLOG_FILE + + NEXT_TOKEN=$(cat $CWLOG_FILE | jq -crM .nextForwardToken) + echo $NEXT_TOKEN >$CWLOGS_DIR/next.token + NEXT_TOKEN_ARG="--next-token $NEXT_TOKEN" + EVENT_COUNT=$(cat $CWLOG_FILE | jq -crM '.events | length') + + if [[ $EVENT_COUNT == 0 ]]; then + sleep 2 + rm $CWLOG_FILE + else + cat $CWLOG_FILE | jq -crM '.events[] | [.timestamp,(.message | fromjson | [.messageType,.logger,.message] | join(" "))] | join(" ")' | tee -a $CWLOGS_DIR/formatted.log + fi + + ITERATION=$(echo "1 + $ITERATION" | bc) +done diff --git a/aws-terraform/main.tf b/aws-terraform/main.tf new file mode 100644 index 0000000..a8dcff9 --- /dev/null +++ b/aws-terraform/main.tf @@ -0,0 +1,246 @@ +provider "aws" { + region = "us-east-2" +} + +# Caller identity allows referencing the account ID w/o having to hard-code it in the bucket name +data "aws_caller_identity" "current" {} +# Same for aws_region so we don't have to hard-code the Flink environment variables. +data "aws_region" "current" {} + +resource "aws_s3_bucket" "flink_demo_bucket" { + # Bucket names must be globally unique, so I'm appending the account ID to workaround BucketAlreadyExists + bucket = "flink-tf-demo-bucket-${data.aws_caller_identity.current.account_id}" +} + +resource "aws_s3_bucket_ownership_controls" "flink_demo_bucket_ownership_controls" { + bucket = aws_s3_bucket.flink_demo_bucket.id + rule { + object_ownership = "BucketOwnerPreferred" + } +} + +resource "aws_s3_bucket_acl" "flink_demo_bucket_acl" { + depends_on = [aws_s3_bucket_ownership_controls.flink_demo_bucket_ownership_controls] + + bucket = aws_s3_bucket.flink_demo_bucket.id + acl = "private" +} + +resource "aws_kinesis_stream" "flink_demo_ingress" { + name = "flink-tf-demo-ingress" + shard_count = 1 + retention_period = 24 # Retention period in hours + + shard_level_metrics = [ + "IncomingBytes", + "OutgoingBytes", + ] + + stream_mode_details { + stream_mode = "PROVISIONED" + } +} + +resource "aws_kinesis_stream" "flink_demo_egress" { + name = "flink-tf-demo-egress" + shard_count = 1 + retention_period = 24 # Retention period in hours + + shard_level_metrics = [ + "IncomingBytes", + "OutgoingBytes", + ] + + stream_mode_details { + stream_mode = "PROVISIONED" + } +} + + +resource "aws_iam_role" "flink_application_role" { + name = "flink-application-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "kinesisanalytics.amazonaws.com" + } + }] + }) +} + +resource "aws_iam_role_policy_attachment" "kinisis_full_access" { + role = aws_iam_role.flink_application_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonKinesisFullAccess" +} + +resource "aws_iam_role_policy_attachment" "s3_full_access" { + role = aws_iam_role.flink_application_role.name + policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" +} + +resource "aws_iam_role_policy_attachment" "cloudwatch_full_access" { + role = aws_iam_role.flink_application_role.name + policy_arn = "arn:aws:iam::aws:policy/CloudWatchFullAccess" +} + +resource "aws_iam_role_policy" "flink_app_s3_policy" { + name = "flink-app-s3-policy" + role = aws_iam_role.flink_application_role.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = [ + "s3:GetObject", + "s3:ListBucket" + ] + Effect = "Allow" + Resource = [ + aws_s3_bucket.flink_demo_bucket.arn, + "${aws_s3_bucket.flink_demo_bucket.arn}/*" + ] + }] + }) +} + +resource "aws_iam_role_policy" "flink_app_kinesis_policy" { + name = "flink-app-kinesis-policy" + role = aws_iam_role.flink_application_role.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Resource = [ + "${aws_kinesis_stream.flink_demo_ingress.arn}", + "${aws_kinesis_stream.flink_demo_egress.arn}" + ] + Action = [ + "kinesis:DescribeStream", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards" + ] + }] + }) +} + +resource "aws_iam_role_policy" "flink_app_logs_policy" { + name = "flink-app-logs-policy" + role = aws_iam_role.flink_application_role.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Resource = [ + "${aws_cloudwatch_log_group.flink_demo_log_group.arn}" + ] + Action = [ + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ] + }] + }) +} + +resource "aws_cloudwatch_log_group" "flink_demo_log_group" { + name = "flink-tf-demo-log-group" + retention_in_days = 14 +} + +resource "aws_cloudwatch_log_stream" "flink_demo_log_stream" { + name = "flink-tf-demo-log-stream" + log_group_name = aws_cloudwatch_log_group.flink_demo_log_group.name +} + +resource "aws_iam_role_policy" "flink_app_metrics_policy" { + name = "flink-app-metrics-policy" + role = aws_iam_role.flink_application_role.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Resource = "*" + Action = [ + "cloudwatch:PutMetricData" + ] + }] + }) +} + +# Reference: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kinesisanalyticsv2_application +resource "aws_kinesisanalyticsv2_application" "flink_demo_tf" { + name = "flink-tf-demo-application" + runtime_environment = "FLINK-1_18" + service_execution_role = aws_iam_role.flink_application_role.arn + application_mode = "STREAMING" + start_application = true + + application_configuration { + application_code_configuration { + code_content { + s3_content_location { + bucket_arn = aws_s3_bucket.flink_demo_bucket.arn + file_key = "my-stateful-functions-embedded-java-3.3.0.jar" + } + } + code_content_type = "ZIPFILE" + } + + application_snapshot_configuration { + snapshots_enabled = true + } + + environment_properties { + property_group { + property_group_id = "StatefunApplicationProperties" + + property_map = { + EVENTS_INGRESS_STREAM_DEFAULT = "${aws_kinesis_stream.flink_demo_ingress.name}" + EVENTS_EGRESS_STREAM_DEFAULT = "${aws_kinesis_stream.flink_demo_egress.name}" + AWS_REGION = data.aws_region.current.name + } + } + } + + flink_application_configuration { + checkpoint_configuration { + configuration_type = "CUSTOM" + checkpoint_interval = 60000 # Every minute # Increase this to 300000 in production (every 5 minutes) + checkpointing_enabled = true + } + monitoring_configuration { + configuration_type = "CUSTOM" + log_level = "INFO" + metrics_level = "TASK" + } + parallelism_configuration { + auto_scaling_enabled = false + configuration_type = "CUSTOM" + parallelism = 1 + parallelism_per_kpu = 1 + } + } + + run_configuration { + application_restore_configuration { + application_restore_type = "RESTORE_FROM_LATEST_SNAPSHOT" + # snapshot_name = "xyz" + } + flink_run_configuration { + allow_non_restored_state = false + } + } + } + cloudwatch_logging_options { + log_stream_arn = aws_cloudwatch_log_stream.flink_demo_log_stream.arn + } + + tags = { + ProvisionedBy = "Terraform" + } +} diff --git a/docker-compose.yml b/docker-compose.yml index 325ef6d..67375cc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,7 @@ services: localstack: image: localstack/localstack:3.0.2 + platform: linux/amd64 profiles: [kinesis,statefun,all] ports: - "4566:4566" @@ -18,6 +19,7 @@ services: create-streams: image: amazon/aws-cli + platform: linux/amd64 profiles: [kinesis,statefun,all] depends_on: - localstack @@ -41,6 +43,7 @@ services: " jobmanager: + platform: linux/amd64 profiles: [statefun,all] depends_on: - create-streams @@ -54,6 +57,7 @@ services: -D "state.savepoints.dir=file:///savepoints" -D "state.checkpoints.dir=file:///checkpoints" --job-classname org.apache.flink.statefun.flink.core.StatefulFunctionsJob +# --fromSavepoint=file:///savepoints/savepoint-xxxx-yyyyyy entrypoint: /entrypoint.sh expose: - "6123" @@ -73,6 +77,7 @@ services: - ./docker-mounts/savepoints:/savepoints taskmanager: + platform: linux/amd64 profiles: [statefun,all] depends_on: - jobmanager @@ -104,6 +109,7 @@ services: send-events: image: amazon/aws-cli + platform: linux/amd64 profiles: [send-events,all] volumes: - /var/run/docker.sock:/var/run/docker.sock @@ -126,6 +132,7 @@ services: get-egress-events: image: amazon/aws-cli + platform: linux/amd64 profiles: [get-egress-events,all] volumes: - /var/run/docker.sock:/var/run/docker.sock diff --git a/entrypoint.sh b/entrypoint.sh index 9e9fab3..66c0690 100644 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -2,7 +2,7 @@ set -e -export ENABLE_BUILT_IN_PLUGINS=flink-s3-fs-hadoop-1.16.2.jar +export ENABLE_BUILT_IN_PLUGINS=flink-s3-fs-hadoop-1.18.1.jar # fix for rocksb memory fragmentation issue export LD_PRELOAD=$LD_PRELOAD:/usr/lib/x86_64-linux-gnu/libjemalloc.so diff --git a/pom.xml b/pom.xml index 952d300..5d820a5 100644 --- a/pom.xml +++ b/pom.xml @@ -15,13 +15,15 @@ UTF-8 - 3.3.0 - 1.16.2 + 3.3-1.18 + 1.18.1 + 3.7.1 11 ${java.version} ${java.version} true 2.20.162 + 1.2.0 @@ -49,27 +51,6 @@ apache-client - - software.amazon.awssdk - regions - - - - software.amazon.awssdk - auth - - - - software.amazon.awssdk - opensearch - - - - - software.amazon.awssdk - sts - @@ -85,12 +66,41 @@ - + org.apache.flink - statefun-sdk-embedded + statefun-flink-distribution ${statefun.version} - provided + + + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + + org.apache.flink + flink-connector-kafka + + + org.apache.flink + statefun-kafka-io + + + + + + org.apache.flink + statefun-flink-core + ${statefun.version} + + + com.amazonaws + aws-kinesisanalytics-runtime + ${kda.runtime.version} @@ -109,15 +119,9 @@ com.google.protobuf protobuf-java - 3.16.3 + ${protobuf.version} - - - org.apache.flink - statefun-flink-datastream - ${statefun.version} - @@ -151,20 +155,6 @@ 3.11 - - - org.apache.flink - flink-state-processor-api - ${flink.version} - - - - org.apache.flink - statefun-flink-distribution - ${statefun.version} - - - org.apache.flink @@ -173,12 +163,6 @@ test - - org.apache.flink - statefun-flink-state-processor - ${statefun.version} - - org.springframework.boot spring-boot-starter-test @@ -255,7 +239,7 @@ protobuf-maven-plugin 0.6.1 - com.google.protobuf:protoc:3.15.8:exe:${os.detected.classifier} + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} @@ -298,7 +282,8 @@ ${project.build.directory}/flink-plugins false true - true + + false @@ -339,7 +324,7 @@ - org.apache.flink.statefun.flink.core.StatefulFunctionsJob + com.example.stateful_functions.Main @@ -409,7 +394,7 @@ org.xolstice.maven.plugins protobuf-maven-plugin - com.il.otk.com.google.protobuf:protoc:3.15.8:exe:${os.detected.classifier}-alpine + com.il.otk.com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}-alpine diff --git a/src/main/java/com/example/stateful_functions/Configuration.java b/src/main/java/com/example/stateful_functions/Configuration.java index 3681993..5984abb 100644 --- a/src/main/java/com/example/stateful_functions/Configuration.java +++ b/src/main/java/com/example/stateful_functions/Configuration.java @@ -1,12 +1,14 @@ package com.example.stateful_functions; +import com.amazonaws.services.kinesisanalytics.runtime.KinesisAnalyticsRuntime; import org.apache.flink.statefun.sdk.kinesis.auth.AwsRegion; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.lang.reflect.Field; import java.util.Locale; -import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Properties; public class Configuration { @@ -22,6 +24,7 @@ public class Configuration { public static boolean USE_ENHANCED_FANOUT = properties.getOrDefault("USE_ENHANCED_FANOUT", "true").equals("true"); public static String ENHANCED_FANOUT_NAME = properties.getOrDefault("ENHANCED_FANOUT_NAME", "example-enhanced-fanout").toString(); + public static String APP_VERSION = properties.getOrDefault("app.version", "0.1").toString(); public static final AwsRegion getAwsRegion() { @@ -49,14 +52,30 @@ public static final AwsRegion getAwsRegion() { } } - private static final Properties getProperties() { + private static Properties getProperties() { // System.getProperties + System.getenv() - Properties properties = System.getProperties(); - Map env = System.getenv(); + Properties properties = new Properties(); + try { + properties.load(Configuration.class.getResourceAsStream("/application.properties")); + } catch (Exception x) { + LOG.warn(x.getMessage(), x); + } - properties.putAll(env); + properties.putAll(System.getProperties()); + properties.putAll(System.getenv()); + // If deployed in AWS Managed Flink, then get our config from + // KinesisAnalyticsRuntime.getApplicationProperties().get("StatefunApplicationProperties") + try { + Optional.ofNullable(KinesisAnalyticsRuntime.getApplicationProperties()) + .map(ap -> ap.get("StatefunApplicationProperties")) + .filter(Objects::nonNull) + .ifPresent(ap -> properties.putAll(ap)); + } + catch (Exception x) { + LOG.warn(x.getMessage(), x); + } return properties; } diff --git a/src/main/java/com/example/stateful_functions/Main.java b/src/main/java/com/example/stateful_functions/Main.java new file mode 100644 index 0000000..da6c1ab --- /dev/null +++ b/src/main/java/com/example/stateful_functions/Main.java @@ -0,0 +1,26 @@ +package com.example.stateful_functions; + +import org.apache.flink.statefun.flink.core.StatefulFunctionsConfig; +import org.apache.flink.statefun.flink.core.StatefulFunctionsJob; +import org.apache.flink.statefun.flink.core.StatefulFunctionsUniverseProvider; +import org.apache.flink.statefun.flink.core.spi.Modules; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; + +/** Use the main() method here instead of StatefulFunctionJob.main() as described in AWS Managed Flink docs */ +public class Main { + + public static void main(String... args) throws Exception { + StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + + StatefulFunctionsConfig stateFunConfig = StatefulFunctionsConfig.fromEnvironment(env); + stateFunConfig.setProvider((StatefulFunctionsUniverseProvider) (classLoader, statefulFunctionsConfig) -> { + Modules modules = Modules.loadFromClassPath(stateFunConfig); + return modules.createStatefulFunctionsUniverse(); + }); + + + StatefulFunctionsJob.main(env, stateFunConfig); + } + +} + diff --git a/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java b/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java index 97d3c32..3921a31 100644 --- a/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java +++ b/src/main/java/com/example/stateful_functions/cloudevents/data/CartItemStatusDetails.java @@ -8,6 +8,7 @@ public class CartItemStatusDetails { private int quantity; private BigDecimal currentPrice; private ProductAvailability availability; + private String version; // version of the app from application.properties public CartItemStatusDetails() { } @@ -18,6 +19,7 @@ private CartItemStatusDetails(Builder builder) { quantity = builder.quantity; currentPrice = builder.currentPrice; availability = builder.availability; + version = builder.version; } public String getProductId() { @@ -40,6 +42,9 @@ public ProductAvailability getAvailability() { return availability; } + public String getVersion() { + return version; + } public static final class Builder { private String productId; @@ -47,6 +52,7 @@ public static final class Builder { private int quantity; private BigDecimal currentPrice; private ProductAvailability availability; + private String version; public Builder() { } @@ -76,6 +82,11 @@ public Builder availability(ProductAvailability val) { return this; } + public Builder version(String val) { + version = val; + return this; + } + public CartItemStatusDetails build() { return new CartItemStatusDetails(this); } diff --git a/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java b/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java index eaf9aec..34d425d 100644 --- a/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java +++ b/src/main/java/com/example/stateful_functions/egress/EgressSerializer.java @@ -8,6 +8,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Optional; + import static java.nio.charset.StandardCharsets.UTF_8; @@ -19,8 +21,8 @@ public class EgressSerializer implements KinesisEgressSerializer new CartStateDetails(cartProduct.getCartId())); + CartStateDetails cartState = state.get(); + if (cartState == null) { + LOG.info("Creating state for {}", context.self().id()); + cartState = new CartStateDetails(cartProduct.getCartId()); + } + else { + LOG.info("Updating state for {}", context.self().id()); + } CartItemStateDetails cartItem = cartState.getItems().get(cartProduct.getProductId()); @@ -86,7 +94,6 @@ private void handleCartProductEvent(Context context, CloudEvent event) { final int resultingItemQuantity; if (cartItem == null) { - startingItemQuantity = 0; resultingItemQuantity = cartProduct.getQuantity(); } else { @@ -129,9 +136,13 @@ private void handleCartProductEvent(Context context, CloudEvent event) { private void handleProductEvent(Context context, CloudEvent event) { CartStateDetails cartState = state.get(); if (cartState == null) { + LOG.info("Nonexistent state for {}", context.self().id()); // Nothing to do return; } + else { + LOG.info("Updating state for {}", context.self().id()); + } ProductEventDetails productDetails = cloudEventDataAccess.toProductEventDetails(event); CartItemStateDetails cartItem = cartState.getItems().get(productDetails.getId()); @@ -158,6 +169,7 @@ private void egressCartStatus(Context context, CartStateDetails cartState) { .originPrice(itemStateDetails.getOriginPrice()) .currentPrice(itemStateDetails.getPrice()) .availability(ProductAvailability.valueOf(itemStateDetails.getAvailability().name())) + .version(Configuration.APP_VERSION) .build() ); @@ -170,6 +182,7 @@ private void egressCartStatus(Context context, CartStateDetails cartState) { .withTime(OffsetDateTime.now(ZoneOffset.UTC)) .build(); + LOG.info("Publishing cart status event to egress: {}", cloudEventJsonFormat.serialize(cartStatusEvent)); egressEvent(context, cartStatusEvent, cartState.getId()); } } \ No newline at end of file diff --git a/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java b/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java index 8596550..7c0a947 100644 --- a/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java +++ b/src/main/java/com/example/stateful_functions/function/product/ProductStatefulFunction.java @@ -87,6 +87,12 @@ public void handleEvent(Context context, CloudEvent event) { } private void handleProductEvent(Context context, CloudEvent event) { + if (state.get() == null) { + LOG.info("Creating state for {}", context.self().id()); + } + else { + LOG.info("Updating state for {}", context.self().id()); + } state.set(fromProductEventDetails(cloudEventDataAccess.toProductEventDetails(event))); notifySubscribers(context, event); } diff --git a/src/main/proto/envelope.proto b/src/main/proto/envelope.proto index 0a475d8..1ee9f76 100644 --- a/src/main/proto/envelope.proto +++ b/src/main/proto/envelope.proto @@ -7,5 +7,5 @@ option java_outer_classname = "ExampleProtobuf"; message Envelope { string payload = 1; - optional string partitionKey = 2; + string partitionKey = 2; } diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index d52783c..77cc591 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -1,2 +1,3 @@ # Reduce noise, primarily for tests spring.main.banner-mode=off +app.version=1.0