diff --git a/.prettierignore b/.prettierignore
similarity index 100%
rename from .prettierignore
rename to .prettierignore
diff --git a/lib/allora/.gitignore b/lib/allora/.gitignore
new file mode 100644
index 00000000..f60797b6
--- /dev/null
+++ b/lib/allora/.gitignore
@@ -0,0 +1,8 @@
+*.js
+!jest.config.js
+*.d.ts
+node_modules
+
+# CDK asset staging directory
+.cdk.staging
+cdk.out
diff --git a/lib/allora/.npmignore b/lib/allora/.npmignore
new file mode 100644
index 00000000..c1d6d45d
--- /dev/null
+++ b/lib/allora/.npmignore
@@ -0,0 +1,6 @@
+*.ts
+!*.d.ts
+
+# CDK asset staging directory
+.cdk.staging
+cdk.out
diff --git a/lib/allora/README.md b/lib/allora/README.md
new file mode 100644
index 00000000..628ef9a9
--- /dev/null
+++ b/lib/allora/README.md
@@ -0,0 +1,237 @@
+# Sample AWS Blockchain Node Runner app for Allora Worker Nodes
+
+| Contributed by |
+|:--------------------:|
+| [@clementupshot](https://github.com/clementupshot), [@allora-rc](https://github.com/allora-rc), [@Madisonw](https://github.com/Madisonw)|
+
+[Allora](https://www.allora.network/) is a self-improving decentralized Artificial Intelligence (AI) network. The primary goal of the network is to be the marketplace for intelligence. In other words, Allora aims to incentivize data scientists (workers) to provide high-quality inferences as requested by consumers. Inferences include predictions of arbitrary future events or difficult computations requiring specialized knowledge.
+
+The Allora Network brings together:
+
+ - [Consumers](https://docs.allora.network/devs) who pay for and acquire inferences or expertise to be revealed
+ - [Workers](https://v2.docs.allora.network/datasci) who reveal inferences
+ - [Reputers](https://docs.allora.network/nops) who determine how accurate workers are after a ground truth is revealed
+ - [Validators](https://docs.allora.network/nops) who secure protocol state, history, and reward distributions
+
+With these ingredients, the Allora Network is able to continuously learn and improve itself over time producing inferences that are more accurate than the most accurate participant.
+
+Allora Worker nodes are the interfaces between data scientists' models and the Allora Network. A worker node is a machine-intelligent application registered on the Allora chain that provides inference/prediction on a particular topic it's subscribed to and gets rewarded based on the inference quality.
+
+This blueprint is designed to assist in deploying a single Allora [Worker Node](https://v2.docs.allora.network/datasci) on AWS. It is intended for use in development, testing, or Proof of Concept (PoC) environments.
+
+## Overview of Deployment Architecture
+
+### Single Worker Node Setup
+
+
+The AWS Cloud Development Kit (CDK) is used to deploy a single Allora Worker Node. The CDK application deploys the following infrastructure:
+
+ - Virtual Private Cloud (VPC)
+ - Internet Gateway (IGW) to allow inbound requests for inferences from consumers and outbound responses from the worker node revealing inferences
+ - Public subnet that has a direct route to the IGW
+ - Security Group (SG) with TCP Port 9010 open inbound allowing requests for inferences to be routed to the Allora Worker Node
+ - Single Amazon Elastic Compute Cloud (EC2) instance (the Allora Worker Node) assigned to the public subnet
+
+The Allora Worker Node is accessed by the user internally and is not exposed to the Internet to protect the node from unauthorized access. A user can gain access to the EC2 Instance using AWS Session Manager.
+
+Multiple processes run on the Allora Worker Node (EC2 instance):
+
+ - Docker container with the worker node logic that handles communication between the worker and the public head nodes
+ - Docker container running the model server that reveals inferences to consumers
+
+Allora Public Head Nodes publish the Allora chain requests (requests for inferences from consumers) to Allora worker nodes. When a worker node is initialized, it starts with an environment variable called BOOT_NODES, which helps handle the connection and communications between worker nodes and the head nodes.
+
+The worker node (docker container) will call the function that invokes custom logic that handles the actual inference. The request-response is a bidirectional flow from the Allora chain (inference requests from consumers) to the public head nodes to the worker node and finally to the model server that reveals inferences.
+
+## Additional materials
+
+
+
+Well-Architected Checklist
+
+This is the Well-Architected checklist for the Allora worker nodes implementation of the AWS Blockchain Node Runner app. This checklist takes into account questions from the [AWS Well-Architected Framework](https://aws.amazon.com/architecture/well-architected/) which are relevant to this workload. Please feel free to add more checks from the framework if required for your workload.
+
+| Pillar | Control | Question/Check | Remarks |
+|:------------------------|:----------------------------------|:---------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Security | Network protection | Are there unnecessary open ports in security groups? | Please note that port 9010 (TCP) is open inbound to support requests for inferences from the Allora Network public head nodes. |
+| | | Traffic inspection | Traffic protection is not used in the solution. [AWS Web Applications Firewall (WAF)](https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html) could be implemented for traffic over HTTP(S), [AWS Shield](https://docs.aws.amazon.com/waf/latest/developerguide/shield-chapter.html) provides Distributed Denial of Service (DDoS) protection. Additional charges will apply. |
+| | Compute protection | Reduce attack surface | This solution uses Amazon Linux AMI. You may choose to run hardening scripts on it. |
+| | | Enable people to perform actions at a distance | This solution uses [AWS Systems Manager for terminal session](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html#start-sys-console). SSH Port 22 is not open inbound. |
+| | Data protection at rest | Use encrypted Amazon Elastic Block Store (Amazon EBS) volumes | This solution uses encrypted Amazon EBS volumes. |
+| | Authorization and access control | Use instance profile with Amazon Elastic Compute Cloud (Amazon EC2) instances | This solution uses AWS Identity and Access Management (AWS IAM) role instead of IAM user. |
+| | | Following principle of least privilege access | Root user is not used (using special user "ec2-user" instead). |
+| | Application security | Security focused development practices | cdk-nag is being used with appropriate suppressions. |
+| Cost optimization | Service selection | Use cost effective resources | We use a T3 instance as T3 instances are a low cost burstable general purpose instance type that provide a baseline level of CPU performance with the ability to burst CPU usage at any time for as long as required. T3 instances are designed for applications with moderate CPU usage that experience temporary spikes in use. This profile aligns closely with the load profile of Allora Network worker nodes. |
+| Reliability | Resiliency implementation | Withstand component failures | This solution does not use an [AWS EC2 Auto Scaling Group](https://aws.amazon.com/ec2/autoscaling/) but one can be implemented. |
+| | Data backup | How is data backed up? | Considering blockchain data is replicated by Allora Cosmos AppChain Validator nodes, we don't use additional mechanisms to backup the data. |
+| | Resource monitoring | How are workload resources monitored? | Resources are not being monitored using Amazon CloudWatch dashboards. Amazon CloudWatch custom metrics are being pushed via CloudWatch Agent. |
+| Performance efficiency | Compute selection | How is compute solution selected? | Compute solution is selected based on best price-performance, i.e. AWS EC2 T3 Medium instance suitable for bursty workloads. |
+| | Storage selection | How is storage solution selected? | Storage solution is selected based on best price-performance, i.e. Amazon EBS volumes with optimal IOPS and throughput. |
+| | Architecture selection | How is the best performance architecture selected? | A combination of recommendations from the Allora Network community and Allora Labs testing. |
+| Sustainability | Hardware & services | Select most efficient hardware for your workload | The solution uses AMD-powered instances. There is a potential to use AWS Graviton-based Amazon EC2 instances which offer the best performance per watt of energy use in Amazon EC2.
+
+
+## Worker Node System Requirements
+
+- Operating System: Any modern Linux operating system
+- CPU: Minimum of 2 cores
+- Memory: Minimum of 4GB
+- Storage: SSD or NVMe with minimum of 20GB of space
+
+## Setup Instructions
+
+### Open AWS CloudShell
+
+To begin, ensure you login to your AWS account with permissions to create and modify resources in IAM, EC2, EBS, VPC, S3, KMS, and Secrets Manager.
+
+From the AWS Management Console, open the [AWS CloudShell](https://docs.aws.amazon.com/cloudshell/latest/userguide/welcome.html), a web-based shell environment. If unfamiliar, review the [2-minute YouTube video](https://youtu.be/fz4rbjRaiQM) for an overview and check out [CloudShell with VPC environment](https://docs.aws.amazon.com/cloudshell/latest/userguide/creating-vpc-environment.html) that we'll use to test nodes API from internal IP address space.
+
+Once ready, you can run the commands to deploy and test blueprints in the CloudShell.
+
+### Clone this repository and install dependencies
+
+```bash
+git clone https://github.com/aws-samples/aws-blockchain-node-runners.git
+cd aws-blockchain-node-runners
+npm install
+```
+
+### Deploy single worker node
+
+1. Make sure you are in the root directory of the cloned repository
+
+2. Configure your setup
+
+ Create your own copy of `.env` file and edit it to update with your AWS Account ID and Region:
+```bash
+# Make sure you are in aws-blockchain-node-runners/lib/allora
+cd lib/allora
+npm install
+pwd
+cp ./sample-configs/.env-sample-full .env
+nano .env
+```
+> NOTE:
+> Example configuration parameters are set in the local `.env-sample` file. You can find more examples inside `sample-configs` directory.
+
+> IMPORTANT:
+> All AWS CDK v2 deployments use dedicated AWS resources to hold data during deployment. Therefore, your AWS account and Region must be [bootstrapped](https://docs.aws.amazon.com/cdk/v2/guide/bootstrapping.html) to create these resources before you can deploy. If you haven't already bootstrapped, issue the following command:
+> ```bash
+> cdk bootstrap aws://ACCOUNT-NUMBER/REGION
+> ```
+
+3. Deploy Common Stack
+
+```bash
+pwd
+# Make sure you are in aws-blockchain-node-runners/lib/allora
+npx cdk deploy allora-common
+```
+
+4. Deploy Allora Worker Node
+
+```bash
+pwd
+# Make sure you are in aws-blockchain-node-runners/lib/allora
+npx cdk deploy allora-single-node --json --outputs-file single-node-deploy.json
+```
+
+5. Test your node
+
+```bash
+INSTANCE_ID=$(cat single-node-deploy.json | jq -r '..|.nodeinstanceid? | select(. != null)')
+NODE_INTERNAL_IP=$(aws ec2 describe-instances --instance-ids $INSTANCE_ID --query 'Reservations[*].Instances[*].PrivateIpAddress' --output text)
+echo "NODE_INTERNAL_IP=$NODE_INTERNAL_IP"
+TOKEN="ETH"
+
+curl http://$NODE_INTERNAL_IP:8000/inference/$TOKEN
+```
+
+## Clear up and undeploy everything
+
+1. Undeploy worker node and common components
+
+```bash
+# Setting the AWS account id and region in case local .env file is lost
+export AWS_ACCOUNT_ID=
+export AWS_REGION=
+
+pwd
+# Make sure you are in aws-blockchain-node-runners/lib/allora
+
+# Undeploy Single Node
+npx cdk destroy allora-single-node
+
+# Undeploy Common Stack
+npx cdk destroy allora-common
+```
+
+### FAQ
+
+1. How to check the logs from the EC2 user-data script?
+
+Please enter the [AWS Management Console - EC2 Instances](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#Instances:instanceState=running), choose the correct region, copy the instance ID you need to query.
+
+```bash
+pwd
+# Make sure you are in aws-blockchain-node-runners/lib/allora
+
+export INSTANCE_ID="i-**************"
+echo "INSTANCE_ID=" $INSTANCE_ID
+aws ssm start-session --target $INSTANCE_ID --region $AWS_REGION
+sudo cat /var/log/cloud-init-output.log
+```
+2. How to check the worker node connectivity to the Allora Network?
+
+Please enter the [AWS Management Console - EC2 Instances](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#Instances:instanceState=running), choose the correct region, copy the instance ID you need to query.
+
+```bash
+pwd
+# Make sure you are in aws-blockchain-node-runners/lib/allora
+
+export INSTANCE_ID="i-**************"
+echo "INSTANCE_ID=" $INSTANCE_ID
+aws ssm start-session --target $INSTANCE_ID --region $AWS_REGION
+```
+
+You should be able to query Topic 1 on the Allora Network and see similar output below
+```bash
+$ allorad q emissions topic 1 --node https://allora-rpc.testnet-1.testnet.allora.network
+effective_revenue: "0"
+topic:
+ allow_negative: true
+ alpha_regret: "0.1"
+ creator: allo1lzf3xp0zqg4239mrswd0cclsgt3y8fl7l84hxu
+ default_arg: ETH
+ epoch_last_ended: "183177"
+ epoch_length: "120"
+ ground_truth_lag: "120"
+ id: "1"
+ inference_logic: bafybeifqs2c7ghellof657rygvrh6ht73scto3oznw4i747sqk3ihy7s5m
+ inference_method: allora-inference-function.wasm
+ loss_logic: bafybeid7mmrv5qr4w5un6c64a6kt2y4vce2vylsmfvnjt7z2wodngknway
+ loss_method: loss-calculation-eth.wasm
+ metadata: ETH 10min Prediction
+ p_norm: "3"
+ tolerance: "0.001"
+weight: "0"
+```
+3. How to check the Allora worker containers are running?
+
+Please enter the [AWS Management Console - EC2 Instances](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#Instances:instanceState=running), choose the correct region, copy the instance ID you need to query.
+
+```bash
+pwd
+# Make sure you are in aws-blockchain-node-runners/lib/allora
+
+export INSTANCE_ID="i-**************"
+echo "INSTANCE_ID=" $INSTANCE_ID
+aws ssm start-session --target $INSTANCE_ID --region $AWS_REGION
+```
+
+```bash
+[ec2-user@ip-192-168-0-224 ~]$ docker ps -a
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+b10c12c51f32 worker-worker "allora-node allora-…" 18 hours ago Exited (2) 18 hours ago worker
+05273577ce7a alloranetwork/allora-inference-base-head:latest "allora-node allora-…" 18 hours ago Exited (2) 18 hours ago head
+```
diff --git a/lib/allora/app.ts b/lib/allora/app.ts
new file mode 100644
index 00000000..aba53a38
--- /dev/null
+++ b/lib/allora/app.ts
@@ -0,0 +1,23 @@
+#!/usr/bin/env node
+import 'dotenv/config';
+import 'source-map-support/register';
+import * as cdk from 'aws-cdk-lib';
+import { AlloraCommonStack } from "./lib/common-stack";
+import { AlloraStack } from './lib/single-node-stack';
+import { baseConfig, singleNodeConfig } from './lib/config/node-config';
+
+const app = new cdk.App();
+
+new AlloraCommonStack(app, "allora-common", {
+ stackName: `allora-common`,
+ env: { account: baseConfig.accountId, region: baseConfig.region },
+});
+
+new AlloraStack(app, 'allora-single-node', {
+ stackName: 'allora-single-node',
+ env: {
+ account: baseConfig.accountId,
+ region: baseConfig.region
+ },
+ ...singleNodeConfig
+});
diff --git a/lib/allora/cdk.json b/lib/allora/cdk.json
new file mode 100644
index 00000000..e30ac6ac
--- /dev/null
+++ b/lib/allora/cdk.json
@@ -0,0 +1,72 @@
+{
+ "app": "npx ts-node --prefer-ts-exts app.ts",
+ "watch": {
+ "include": [
+ "**"
+ ],
+ "exclude": [
+ "README.md",
+ "cdk*.json",
+ "**/*.d.ts",
+ "**/*.js",
+ "tsconfig.json",
+ "package*.json",
+ "yarn.lock",
+ "node_modules",
+ "test"
+ ]
+ },
+ "context": {
+ "@aws-cdk/aws-lambda:recognizeLayerVersion": true,
+ "@aws-cdk/core:checkSecretUsage": true,
+ "@aws-cdk/core:target-partitions": [
+ "aws",
+ "aws-cn"
+ ],
+ "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true,
+ "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true,
+ "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true,
+ "@aws-cdk/aws-iam:minimizePolicies": true,
+ "@aws-cdk/core:validateSnapshotRemovalPolicy": true,
+ "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true,
+ "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true,
+ "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true,
+ "@aws-cdk/aws-apigateway:disableCloudWatchRole": true,
+ "@aws-cdk/core:enablePartitionLiterals": true,
+ "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true,
+ "@aws-cdk/aws-iam:standardizedServicePrincipals": true,
+ "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true,
+ "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true,
+ "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true,
+ "@aws-cdk/aws-route53-patters:useCertificate": true,
+ "@aws-cdk/customresources:installLatestAwsSdkDefault": false,
+ "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true,
+ "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true,
+ "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true,
+ "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true,
+ "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true,
+ "@aws-cdk/aws-redshift:columnId": true,
+ "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true,
+ "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true,
+ "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true,
+ "@aws-cdk/aws-kms:aliasNameRef": true,
+ "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true,
+ "@aws-cdk/core:includePrefixInUniqueNameGeneration": true,
+ "@aws-cdk/aws-efs:denyAnonymousAccess": true,
+ "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true,
+ "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true,
+ "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true,
+ "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true,
+ "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true,
+ "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true,
+ "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true,
+ "@aws-cdk/aws-cloudwatch-actions:changeLambdaPermissionLogicalIdForLambdaAction": true,
+ "@aws-cdk/aws-codepipeline:crossAccountKeysDefaultValueToFalse": true,
+ "@aws-cdk/aws-codepipeline:defaultPipelineTypeToV2": true,
+ "@aws-cdk/aws-kms:reduceCrossAccountRegionPolicyScope": true,
+ "@aws-cdk/aws-eks:nodegroupNameAttribute": true,
+ "@aws-cdk/aws-ec2:ebsDefaultGp3Volume": true,
+ "@aws-cdk/aws-ecs:removeDefaultDeploymentAlarm": true,
+ "@aws-cdk/custom-resources:logApiResponseDataPropertyTrueDefault": false
+ }
+}
diff --git a/lib/allora/doc/assets/Architecture-Single-Allora-Worker-Node-Source.drawio b/lib/allora/doc/assets/Architecture-Single-Allora-Worker-Node-Source.drawio
new file mode 100644
index 00000000..b80328b0
--- /dev/null
+++ b/lib/allora/doc/assets/Architecture-Single-Allora-Worker-Node-Source.drawio
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/allora/doc/assets/Architecture-Single-Allora-Worker-Node.png b/lib/allora/doc/assets/Architecture-Single-Allora-Worker-Node.png
new file mode 100644
index 00000000..197544d8
Binary files /dev/null and b/lib/allora/doc/assets/Architecture-Single-Allora-Worker-Node.png differ
diff --git a/lib/allora/jest.config.js b/lib/allora/jest.config.js
new file mode 100644
index 00000000..08263b89
--- /dev/null
+++ b/lib/allora/jest.config.js
@@ -0,0 +1,8 @@
+module.exports = {
+ testEnvironment: 'node',
+ roots: ['/test'],
+ testMatch: ['**/*.test.ts'],
+ transform: {
+ '^.+\\.tsx?$': 'ts-jest'
+ }
+};
diff --git a/lib/allora/lib/assets/instance/cfn-hup/cfn-auto-reloader.conf b/lib/allora/lib/assets/instance/cfn-hup/cfn-auto-reloader.conf
new file mode 100644
index 00000000..3cd32a0a
--- /dev/null
+++ b/lib/allora/lib/assets/instance/cfn-hup/cfn-auto-reloader.conf
@@ -0,0 +1,4 @@
+[cfn-auto-reloader-hook]
+triggers=post.update
+path=Resources.WebServerHost.Metadata.AWS::CloudFormation::Init
+action=/opt/aws/bin/cfn-init -v --stack __AWS_STACK_NAME__ --resource WebServerHost --region __AWS_REGION__
diff --git a/lib/allora/lib/assets/instance/cfn-hup/cfn-hup.conf b/lib/allora/lib/assets/instance/cfn-hup/cfn-hup.conf
new file mode 100644
index 00000000..2163b37a
--- /dev/null
+++ b/lib/allora/lib/assets/instance/cfn-hup/cfn-hup.conf
@@ -0,0 +1,5 @@
+[main]
+stack=__AWS_STACK_ID__
+region=__AWS_REGION__
+# The interval used to check for changes to the resource metadata in minutes. Default is 15
+interval=2
diff --git a/lib/allora/lib/assets/instance/cfn-hup/cfn-hup.service b/lib/allora/lib/assets/instance/cfn-hup/cfn-hup.service
new file mode 100644
index 00000000..2660ea46
--- /dev/null
+++ b/lib/allora/lib/assets/instance/cfn-hup/cfn-hup.service
@@ -0,0 +1,8 @@
+[Unit]
+Description=cfn-hup daemon
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/cfn-hup
+Restart=always
+[Install]
+WantedBy=multi-user.target
diff --git a/lib/allora/lib/assets/instance/cfn-hup/setup.sh b/lib/allora/lib/assets/instance/cfn-hup/setup.sh
new file mode 100755
index 00000000..418811e4
--- /dev/null
+++ b/lib/allora/lib/assets/instance/cfn-hup/setup.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+if [ -n "$1" ]; then
+ export STACK_ID=$1
+else
+ echo "Error: No Stack ID is provided"
+ echo "Usage: instance/cfn-hup/setup.sh "
+ exit 1
+fi
+
+if [ -n "$2" ]; then
+ export AWS_REGION=$2
+else
+ echo "Error: No AWS Region is provided"
+ echo "Usage: instance/cfn-hup/setup.sh "
+ exit 1
+fi
+
+ echo "Install CloudFormation helper scripts"
+ mkdir -p /opt/aws/
+ pip3 install --break-system-packages https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-py3-latest.tar.gz
+ ln -s /usr/local/init/ubuntu/cfn-hup /etc/init.d/cfn-hup
+
+ echo "Configuring CloudFormation helper scripts"
+ mkdir -p /etc/cfn/
+ mv /opt/instance/cfn-hup/cfn-hup.conf /etc/cfn/cfn-hup.conf
+ sed -i "s;__AWS_STACK_ID__;\"$STACK_ID\";g" /etc/cfn/cfn-hup.conf
+ sed -i "s;__AWS_REGION__;\"$AWS_REGION\";g" /etc/cfn/cfn-hup.conf
+
+ mkdir -p /etc/cfn/hooks.d/system
+ mv /opt/instance/cfn-hup/cfn-auto-reloader.conf /etc/cfn/hooks.d/cfn-auto-reloader.conf
+ sed -i "s;__AWS_STACK_NAME__;\"$STACK_NAME\";g" /etc/cfn/hooks.d/cfn-auto-reloader.conf
+ sed -i "s;__AWS_REGION__;\"$AWS_REGION\";g" /etc/cfn/hooks.d/cfn-auto-reloader.conf
+
+ echo "Starting CloudFormation helper scripts as a service"
+ mv /opt/instance/cfn-hup/cfn-hup.service /etc/systemd/system/cfn-hup.service
+
+ systemctl daemon-reload
+ systemctl enable --now cfn-hup
+ systemctl start cfn-hup.service
diff --git a/lib/allora/lib/assets/instance/storage/copy-data-from-s3.sh b/lib/allora/lib/assets/instance/storage/copy-data-from-s3.sh
new file mode 100644
index 00000000..b3d0aac2
--- /dev/null
+++ b/lib/allora/lib/assets/instance/storage/copy-data-from-s3.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+source /etc/cdk_environment
+TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
+INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/instance-id)
+
+echo "Sync started at " $(date)
+SECONDS=0
+
+s5cmd --log error cp --exclude 'lost+found' $SNAPSHOT_S3_PATH/data/* /data && \
+chown -R bcuser:bcuser /data && \
+echo "Sync finished at " $(date) && \
+echo "$(($SECONDS / 60)) minutes and $(($SECONDS % 60)) seconds elapsed." && \
+su bcuser && \
+docker compose -f /home/bcuser/docker-compose.yml up -d && \
+aws autoscaling complete-lifecycle-action --lifecycle-action-result CONTINUE --instance-id $INSTANCE_ID --lifecycle-hook-name "$LIFECYCLE_HOOK_NAME" --auto-scaling-group-name "$AUTOSCALING_GROUP_NAME" --region $REGION || \
+aws autoscaling complete-lifecycle-action --lifecycle-action-result ABANDON --instance-id $INSTANCE_ID --lifecycle-hook-name "$LIFECYCLE_HOOK_NAME" --auto-scaling-group-name "$AUTOSCALING_GROUP_NAME" --region $REGION
diff --git a/lib/allora/lib/assets/instance/storage/copy-data-to-s3.sh b/lib/allora/lib/assets/instance/storage/copy-data-to-s3.sh
new file mode 100644
index 00000000..6ecbf81f
--- /dev/null
+++ b/lib/allora/lib/assets/instance/storage/copy-data-to-s3.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+set +e
+source /etc/cdk_environment
+
+/usr/local/bin/docker-compose -f /home/bcuser/docker-compose.yml down
+echo "Sync started at " $(date)
+s5cmd --log error sync /data $SNAPSHOT_S3_PATH/
+echo "Sync finished at " $(date)
+sudo touch /data/snapshotted
+sudo su bcuser
+docker compose -f /home/bcuser/docker-compose.yml up -d
diff --git a/lib/allora/lib/assets/instance/storage/setup.sh b/lib/allora/lib/assets/instance/storage/setup.sh
new file mode 100755
index 00000000..ca5e6b81
--- /dev/null
+++ b/lib/allora/lib/assets/instance/storage/setup.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+
+make_fs () {
+ # If file system = to ext4 use mkfs.ext4, if xfs use mkfs.xfs
+ if [ -z "$1" ]; then
+ echo "Error: No file system type provided."
+ echo "Usage: make_fs "
+ exit 1
+ fi
+
+ if [ -z "$2" ]; then
+ echo "Error: No target volume ID provided."
+ echo "Usage: make_fs "
+ exit 1
+ fi
+
+ local file_system=$1
+ local volume_id=$2
+ if [ "$file_system" == "ext4" ]; then
+ mkfs -t ext4 "$volume_id"
+ return "$?"
+ else
+ mkfs.xfs -f "$volume_id"
+ return "$?"
+ fi
+}
+
+# We need an nvme disk that is not mounted and not partitioned
+get_all_empty_nvme_disks () {
+ local all_not_mounted_nvme_disks
+ local all_mounted_nvme_partitions
+ local unmounted_nvme_disks=()
+ local sorted_unmounted_nvme_disks
+
+ #The disk will only be mounted when the nvme disk is larger than 100GB to avoid storing blockchain node data directly on the root EBS disk (which is 46GB by default)
+ all_not_mounted_nvme_disks=$(lsblk -lnb | awk '{if ($7 == "" && $4 > 100000000) {print $1}}' | grep nvme)
+ all_mounted_nvme_partitions=$(mount | awk '{print $1}' | grep /dev/nvme)
+ for disk in ${all_not_mounted_nvme_disks[*]}; do
+ if [[ ! "${all_mounted_nvme_partitions[*]}" =~ $disk ]]; then
+ unmounted_nvme_disks+=("$disk")
+ fi
+ done
+ # Sort the array
+ sorted_unmounted_nvme_disks=($(printf '%s\n' "${unmounted_nvme_disks[*]}" | sort))
+ echo "${sorted_unmounted_nvme_disks[*]}"
+}
+
+get_next_empty_nvme_disk () {
+ local sorted_unmounted_nvme_disks
+ sorted_unmounted_nvme_disks=($(get_all_empty_nvme_disks))
+ # Return the first unmounted nvme disk
+ echo "/dev/${sorted_unmounted_nvme_disks[0]}"
+}
+
+# Add input as command line parameters for name of the directory to mount
+if [ -n "$1" ]; then
+ DIR_NAME=$1
+else
+ echo "Error: No data file system mount path is provided."
+ echo "Usage: instance/storage/setup.sh "
+ echo "Default file system type is ext4"
+ echo "If you skip , script will try to use the first unformatted volume ID."
+ echo "Usage example: instance/storage/setup.sh /data ext4 300000000000000"
+ exit 1
+fi
+
+# Case input for $2 between ext4 and xfs, use ext4 as default
+case $2 in
+ ext4)
+ echo "File system set to ext4"
+ FILE_SYSTEM="ext4"
+ FS_CONFIG="defaults"
+ ;;
+ xfs)
+ echo "File system set to xfs"
+ FILE_SYSTEM="xfs"
+ FS_CONFIG="noatime,nodiratime,nodiscard" # See more: https://cdrdv2-public.intel.com/686417/rocksdb-benchmark-tuning-guide-on-xeon.pdf
+ ;;
+ *)
+ echo "File system set to ext4"
+ FILE_SYSTEM="ext4"
+ FS_CONFIG="defaults"
+ ;;
+esac
+
+if [ -n "$3" ]; then
+ VOLUME_SIZE=$3
+else
+ echo "The size of volume for $DIR_NAME is not specified. Will try to guess volume ID."
+fi
+
+ echo "Checking if $DIR_NAME is mounted, and dont do anything if it is"
+ if [ $(df --output=target | grep -c "$DIR_NAME") -lt 1 ]; then
+
+ if [ -n "$VOLUME_SIZE" ]; then
+ VOLUME_ID=/dev/$(lsblk -lnb | awk -v VOLUME_SIZE_BYTES="$VOLUME_SIZE" '{if ($4== VOLUME_SIZE_BYTES) {print $1}}')
+ echo "Data volume size defined, use respective volume id: $VOLUME_ID"
+ else
+ VOLUME_ID=$(get_next_empty_nvme_disk)
+ echo "Data volume size undefined, trying volume id: $VOLUME_ID"
+ fi
+
+ make_fs $FILE_SYSTEM "$VOLUME_ID"
+
+ sleep 10
+ VOLUME_UUID=$(lsblk -fn -o UUID "$VOLUME_ID")
+ VOLUME_FSTAB_CONF="UUID=$VOLUME_UUID $DIR_NAME $FILE_SYSTEM $FS_CONFIG 0 2"
+ echo "VOLUME_ID=$VOLUME_ID"
+ echo "VOLUME_UUID=$VOLUME_UUID"
+ echo "VOLUME_FSTAB_CONF=$VOLUME_FSTAB_CONF"
+
+ # Check if data disc is already in fstab and replace the line if it is with the new disc UUID
+ echo "Checking fstab for volume $DIR_NAME"
+ if [ $(grep -c "$DIR_NAME" /etc/fstab) -gt 0 ]; then
+ SED_REPLACEMENT_STRING="$(grep -n "$DIR_NAME" /etc/fstab | cut -d: -f1)s#.*#$VOLUME_FSTAB_CONF#"
+ # if file exists, delete it
+ if [ -f /etc/fstab.bak ]; then
+ rm /etc/fstab.bak
+ fi
+ cp /etc/fstab /etc/fstab.bak
+ sed -i "$SED_REPLACEMENT_STRING" /etc/fstab
+ else
+ echo "$VOLUME_FSTAB_CONF" | tee -a /etc/fstab
+ fi
+
+ mount -a
+ chown -R bcuser:bcuser "$DIR_NAME"
+ else
+ echo "$DIR_NAME volume is mounted, nothing changed"
+ fi
diff --git a/lib/allora/lib/assets/user-data-alinux.sh b/lib/allora/lib/assets/user-data-alinux.sh
new file mode 100644
index 00000000..9ad44dbb
--- /dev/null
+++ b/lib/allora/lib/assets/user-data-alinux.sh
@@ -0,0 +1,217 @@
+#!/bin/bash
+echo "[user-data] STARTING ALLORA USER DATA SCRIPT"
+
+touch /etc/cdk_environment
+chmod 600 /etc/cdk_environment
+
+{
+ echo "AWS_REGION=${_AWS_REGION_}"
+ echo "ASSETS_S3_PATH=${_ASSETS_S3_PATH_}"
+ echo "RESOURCE_ID=${_NODE_CF_LOGICAL_ID_}"
+ echo "STACK_NAME=${_STACK_NAME_}"
+ echo "STACK_ID=${_STACK_ID_}"
+
+ echo "ALLORA_WORKER_NAME=${_ALLORA_WORKER_NAME_}"
+ echo "ALLORA_ENV=${_ALLORA_ENV_}"
+ echo "MODEL_REPO=${_MODEL_REPO_}"
+ echo -e "MODEL_ENV_VARS='${_MODEL_ENV_VARS_}'"
+
+ echo "ALLORA_WALLET_ADDRESS_KEY_NAME=${_ALLORA_WALLET_ADDRESS_KEY_NAME_}"
+ echo "ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC=${_ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC_}"
+ echo "ALLORA_WALLET_HOME_DIR=${_ALLORA_WALLET_HOME_DIR_}"
+ echo "ALLORA_WALLET_GAS_ADJUSTMENT=${_ALLORA_WALLET_GAS_ADJUSTMENT_}"
+ echo "ALLORA_WALLET_GAS=${_ALLORA_WALLET_GAS_}"
+
+ #new props
+ echo "ALLORA_WALLET_GAS_PRICES=${_ALLORA_WALLET_GAS_PRICES_}"
+ echo "ALLORA_WALLET_GAS_PRICE_INTERVAL=${_ALLORA_WALLET_GAS_PRICE_INTERVAL_}"
+ echo "ALLORA_WALLET_RETRY_DELAY=${_ALLORA_WALLET_RETRY_DELAY_}"
+ echo "ALLORA_WALLET_BLOCK_DURATION_ESTIMATED=${_ALLORA_WALLET_BLOCK_DURATION_ESTIMATED_}"
+ echo "ALLORA_WALLET_WINDOW_CORRECTION_FACTOR=${_ALLORA_WALLET_WINDOW_CORRECTION_FACTOR_}"
+ echo "ALLORA_WALLET_MAX_FEES=${_ALLORA_WALLET_MAX_FEES_}"
+ echo "ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY=${_ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY_}"
+ #/new props
+
+ echo "ALLORA_WALLET_NODE_RPC=${_ALLORA_WALLET_NODE_RPC_}"
+ echo "ALLORA_WALLET_MAX_RETRIES=${_ALLORA_WALLET_MAX_RETRIES_}"
+ echo "ALLORA_WALLET_DELAY=${_ALLORA_WALLET_DELAY_}"
+ echo "ALLORA_WALLET_SUBMIT_TX=${_ALLORA_WALLET_SUBMIT_TX_}"
+
+ echo "ALLORA_WORKER_TOPIC_ID=${_ALLORA_WORKER_TOPIC_ID_}"
+ echo "ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME=${_ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME_}"
+ echo "ALLORA_WORKER_INFERENCE_ENDPOINT=${_ALLORA_WORKER_INFERENCE_ENDPOINT_}"
+ echo "ALLORA_WORKER_LOOP_SECONDS=${_ALLORA_WORKER_LOOP_SECONDS_}"
+ echo "ALLORA_WORKER_TOKEN=${_ALLORA_WORKER_TOKEN_}"
+
+ echo "ALLORA_REPUTER_TOPIC_ID=${_ALLORA_REPUTER_TOPIC_ID_}"
+ echo "ALLORA_REPUTER_ENTRYPOINT_NAME=${_ALLORA_REPUTER_ENTRYPOINT_NAME_}"
+ echo "ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT=${_ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT_}"
+
+ #new props
+ echo "ALLORA_REPUTER_LOSS_FUNCTION_SERVICE=${_ALLORA_REPUTER_LOSS_FUNCTION_SERVICE_}"
+ echo "ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD=${_ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD_}"
+ #/new props
+ echo "ALLORA_REPUTER_LOOP_SECONDS=${_ALLORA_REPUTER_LOOP_SECONDS_}"
+ echo "ALLORA_REPUTER_TOKEN=${_ALLORA_REPUTER_TOKEN_}"
+ echo "ALLORA_REPUTER_MIN_STAKE=${_ALLORA_REPUTER_MIN_STAKE_}"
+} >> /etc/cdk_environment
+
+source /etc/cdk_environment
+# Export environment variables so calls to `envsubst` inherit the evironment variables.
+while read -r line; do export "$line"; done < /etc/cdk_environment
+
+arch=$(uname -m)
+
+echo "Architecture detected: $arch"
+
+if [ "$arch" == "x86_64" ]; then
+ SSM_AGENT_BINARY_URI=https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
+else
+ SSM_AGENT_BINARY_URI=https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_arm64/amazon-ssm-agent.rpm
+fi
+
+echo "Updating and installing required system packages"
+dnf update -y
+amazon-linux-extras install epel -y
+dnf groupinstall "Development Tools" -y
+dnf -y install python3-pip amazon-cloudwatch-agent collectd jq gcc10-10.5.0-1.amzn2.0.2 ncurses-devel telnet aws-cfn-bootstrap cronie
+
+cd /opt
+
+echo "Downloading assets zip file"
+aws s3 cp $ASSETS_S3_PATH ./assets.zip
+unzip -q assets.zip
+
+echo 'Upgrading SSM Agent'
+yum install -y $SSM_AGENT_BINARY_URI
+
+# Install Git
+dnf install git -y
+
+echo "Installing Docker"
+dnf remove -y docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine
+dnf -y install dnf-plugins-core
+dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+sed -i 's/$releasever/9/g' /etc/yum.repos.d/docker-ce.repo
+dnf -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+systemctl enable --now docker
+
+docker compose version
+
+echo "Creating run user and making sure it has all necessary permissions"
+groupadd -g 1002 bcuser
+useradd -u 1002 -g 1002 -m -s /bin/bash bcuser
+usermod -a -G docker bcuser
+usermod -a -G docker ec2-user
+
+echo "Starting docker"
+service docker start
+systemctl enable docker
+
+cfn-signal -e $? --stack $STACK_NAME --resource $RESOURCE_ID --region $AWS_REGION
+
+echo "Preparing data volume"
+
+mkdir -p /data
+
+if [[ "$DATA_VOLUME_TYPE" == "instance-store" ]]; then
+ echo "Data volume type is instance store"
+
+ (crontab -l; echo "@reboot /opt/instance/storage/setup.sh /data ext4 > /tmp/setup-store-volume-data.log 2>&1") | crontab -
+ crontab -l
+
+ /opt/instance/storage/setup.sh /data ext4
+else
+ echo "Data volume type is EBS"
+ echo "Waiting for EBS volume to become available"
+ sleep 60
+ /opt/instance/storage/setup.sh /data ext4
+fi
+
+lsblk -d
+
+# clone node repo
+cd /home/bcuser
+git clone https://github.com/allora-network/allora-offchain-node.git node-repo
+cd node-repo
+git checkout $ALLORA_ENV
+
+cp config.cdk.json.template config.json
+
+#wallet config str replace
+sed -i "s/_ALLORA_WALLET_ADDRESS_KEY_NAME_/$ALLORA_WALLET_ADDRESS_KEY_NAME/" config.json
+sed -i "s/_ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC_/$ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC/" config.json
+sed -i "s/_ALLORA_WALLET_HOME_DIR_/$ALLORA_WALLET_HOME_DIR/" config.json
+sed -i "s/_ALLORA_WALLET_GAS_ADJUSTMENT_/$ALLORA_WALLET_GAS_ADJUSTMENT/" config.json #must go before
+
+#new props
+sed -i "s/_ALLORA_WALLET_GAS_PRICE_INTERVAL_/$ALLORA_WALLET_GAS_PRICE_INTERVAL/" config.json #must go first
+sed -i "s/_ALLORA_WALLET_GAS_PRICES_/$ALLORA_WALLET_GAS_PRICES/" config.json
+sed -i "s/_ALLORA_WALLET_GAS_/$ALLORA_WALLET_GAS/" config.json #has to go last of the gas
+sed -i "s/_ALLORA_WALLET_MAX_FEES_/$ALLORA_WALLET_MAX_FEES/" config.json
+sed -i "s/_ALLORA_WALLET_RETRY_DELAY_/$ALLORA_WALLET_RETRY_DELAY/" config.json
+sed -i "s/_ALLORA_WALLET_BLOCK_DURATION_ESTIMATED_/$ALLORA_WALLET_BLOCK_DURATION_ESTIMATED/" config.json
+sed -i "s/_ALLORA_WALLET_WINDOW_CORRECTION_FACTOR_/$ALLORA_WALLET_WINDOW_CORRECTION_FACTOR/" config.json
+sed -i "s/_ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY_/$ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY/" config.json
+
+#/new props
+
+sed -i "s#_ALLORA_WALLET_NODE_RPC_#$ALLORA_WALLET_NODE_RPC#" config.json
+sed -i "s/_ALLORA_WALLET_MAX_RETRIES_/$ALLORA_WALLET_MAX_RETRIES/" config.json
+sed -i "s/_ALLORA_WALLET_DELAY_/$ALLORA_WALLET_DELAY/" config.json #@deprecated
+sed -i "s/_ALLORA_WALLET_SUBMIT_TX_/$ALLORA_WALLET_SUBMIT_TX/" config.json #@deprecated
+
+#worker config str replace
+sed -i "s/_ALLORA_WORKER_TOPIC_ID_/$ALLORA_WORKER_TOPIC_ID/" config.json
+sed -i "s/_ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME_/$ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME/" config.json
+sed -i "s#_ALLORA_WORKER_INFERENCE_ENDPOINT_#$ALLORA_WORKER_INFERENCE_ENDPOINT#" config.json
+sed -i "s/_ALLORA_WORKER_LOOP_SECONDS_/$ALLORA_WORKER_LOOP_SECONDS/" config.json #@deprecated
+sed -i "s/_ALLORA_WORKER_TOKEN_/$ALLORA_WORKER_TOKEN/" config.json
+
+#reputer config str replace
+sed -i "s/_ALLORA_REPUTER_TOPIC_ID_/$ALLORA_REPUTER_TOPIC_ID/" config.json
+sed -i "s/_ALLORA_REPUTER_ENTRYPOINT_NAME_/$ALLORA_REPUTER_ENTRYPOINT_NAME/" config.json
+sed -i "s#_ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT_#$ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT#" config.json
+
+#new props
+sed -i "s#_ALLORA_REPUTER_LOSS_FUNCTION_SERVICE_#$ALLORA_REPUTER_LOSS_FUNCTION_SERVICE#" config.json
+sed -i "s/_ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD_/$ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD/" config.json
+#/new props
+
+sed -i "s/_ALLORA_REPUTER_LOOP_SECONDS_/$ALLORA_REPUTER_LOOP_SECONDS/" config.json #@deprecated
+sed -i "s/_ALLORA_REPUTER_TOKEN_/$ALLORA_REPUTER_TOKEN/" config.json
+sed -i "s/_ALLORA_REPUTER_MIN_STAKE_/$ALLORA_REPUTER_MIN_STAKE/" config.json
+
+#pull in model repo
+echo 'Pulling in the model repo '
+echo $MODEL_REPO
+cd /home/bcuser/node-repo/adapter/api
+rm -rf source
+git clone $MODEL_REPO source
+
+#build node
+echo 'Building inner node'
+cd source
+
+cp /home/bcuser/node-repo/config.json config.json
+
+echo -e "$MODEL_ENV_VARS" >> .env
+
+#build basic worker
+echo 'building basic worker'
+chmod +x init.config
+./init.config
+mkdir /home/bcuser/data
+ln -s /home/bcuser/data /data
+chown -R bcuser:bcuser /data
+chown -R bcuser:bcuser /home/bcuser
+su bcuser
+
+echo "Install allorad"
+curl -sSL https://raw.githubusercontent.com/allora-network/allora-chain/main/install.sh | bash -s -- v0.8.0
+# docker compose up --build
+
+echo "[user-data] Allora user-data script successful"
+
+#ping the server for an inference response to $ALLORA_WORKER_INFERENCE_ENDPOINT/inference/$ALLORA_WORKER_TOKEN
+curl "$ALLORA_WORKER_INFERENCE_ENDPOINT/inference/$ALLORA_WORKER_TOKEN"
diff --git a/lib/allora/lib/common-stack.ts b/lib/allora/lib/common-stack.ts
new file mode 100644
index 00000000..80601ac7
--- /dev/null
+++ b/lib/allora/lib/common-stack.ts
@@ -0,0 +1,53 @@
+import * as cdk from "aws-cdk-lib";
+import * as cdkConstructs from "constructs";
+import * as iam from "aws-cdk-lib/aws-iam";
+import * as nag from "cdk-nag";
+
+export interface AlloraCommonStackProps extends cdk.StackProps {
+
+}
+
+export class AlloraCommonStack extends cdk.Stack {
+ AWS_STACK_NAME = cdk.Stack.of(this).stackName;
+ AWS_ACCOUNT_ID = cdk.Stack.of(this).account;
+
+ constructor(scope: cdkConstructs.Construct, id: string, props: AlloraCommonStackProps) {
+ super(scope, id, props);
+
+ const instanceRole = new iam.Role(this, "node-role", {
+ assumedBy: new iam.ServicePrincipal("ec2.amazonaws.com"),
+ managedPolicies: [
+ iam.ManagedPolicy.fromAwsManagedPolicyName("AmazonSSMManagedInstanceCore"),
+ iam.ManagedPolicy.fromAwsManagedPolicyName("CloudWatchAgentServerPolicy")
+
+ ]
+ });
+
+ instanceRole.addToPolicy(new iam.PolicyStatement({
+ resources: ["*"],
+ actions: ["cloudformation:SignalResource"]
+ }));
+
+
+ new cdk.CfnOutput(this, "Instance Role ARN", {
+ value: instanceRole.roleArn,
+ exportName: "EdgeNodeInstanceRoleArn"
+ });
+
+ // cdk-nag suppressions
+ nag.NagSuppressions.addResourceSuppressions(
+ this,
+ [
+ {
+ id: "AwsSolutions-IAM4",
+ reason: "AmazonSSMManagedInstanceCore and CloudWatchAgentServerPolicy are restrictive enough"
+ },
+ {
+ id: "AwsSolutions-IAM5",
+ reason: "Can't target specific stack: https://github.com/aws/aws-cdk/issues/22657"
+ }
+ ],
+ true
+ );
+ }
+}
diff --git a/lib/allora/lib/config/node-config.interface.ts b/lib/allora/lib/config/node-config.interface.ts
new file mode 100644
index 00000000..4220f92d
--- /dev/null
+++ b/lib/allora/lib/config/node-config.interface.ts
@@ -0,0 +1,55 @@
+import * as configTypes from "../../../constructs/config.interface";
+
+export interface AlloraDataVolumeConfig extends configTypes.DataVolumeConfig {
+}
+
+export interface AlloraBaseConfig extends configTypes.BaseConfig {
+
+}
+
+export interface AlloraSingleNodeConfig extends configTypes.SingleNodeConfig {
+ resourceNamePrefix: string,
+ alloraWorkerName: string,
+ alloraEnv: string,
+ modelRepo: string,
+ modelEnvVars: string,
+
+ //Wallet config
+ alloraWalletAddressKeyName: string,
+ alloraWalletAddressRestoreMnemonic: string,
+ alloraWalletHomeDir: string,
+ alloraWalletGas: string,
+ alloraWalletGasAdjustment: string,
+
+ alloraWalletGasPrices: string,
+ alloraWalletGasPriceInterval: string,
+ alloraWalletRetryDelay: string,
+ alloraWalletBlockDurationEstimated: string,
+ alloraWalletWindowCorrectionFactor: string,
+ alloraWalletAccountSequenceRetryDelay: string,
+
+ alloraWalletNodeRpc: string,
+ alloraWalletMaxRetries: string,
+ alloraWalletDelay: string,
+ alloraWalletSubmitTx: string,
+ alloraWalletMaxFees: string,
+
+ //Worker Properties
+ alloraWorkerTopicId: string,
+ alloraWorkerInferenceEntrypointName: string,
+ alloraWorkerInferenceEndpoint: string,
+ alloraWorkerLoopSeconds: string,
+ alloraWorkerToken: string,
+
+ //Reputer Properties
+ alloraReputerTopicId: string,
+ alloraReputerEntrypointName: string,
+ alloraReputerSourceOfTruthEndpoint: string,
+
+ alloraReputerLossFunctionService: string,
+ alloraReputerLossMethodOptionsLossMethod: string,
+
+ alloraReputerLoopSeconds: string,
+ alloraReputerToken: string,
+ alloraReputerMinStake: string,
+}
diff --git a/lib/allora/lib/config/node-config.ts b/lib/allora/lib/config/node-config.ts
new file mode 100644
index 00000000..e4f6dbc7
--- /dev/null
+++ b/lib/allora/lib/config/node-config.ts
@@ -0,0 +1,89 @@
+import * as ec2 from "aws-cdk-lib/aws-ec2";
+import * as configTypes from "./node-config.interface";
+import * as constants from "../../../constructs/constants";
+
+
+const parseDataVolumeType = (dataVolumeType: string) => {
+ switch (dataVolumeType) {
+ case "gp3":
+ return ec2.EbsDeviceVolumeType.GP3;
+ case "io2":
+ return ec2.EbsDeviceVolumeType.IO2;
+ case "io1":
+ return ec2.EbsDeviceVolumeType.IO1;
+ case "instance-store":
+ return constants.InstanceStoreageDeviceVolumeType;
+ default:
+ return ec2.EbsDeviceVolumeType.GP3;
+ }
+}
+
+export const baseConfig: configTypes.AlloraBaseConfig = {
+ accountId: process.env.AWS_ACCOUNT_ID || "xxxxxxxxxxx", // Set your target AWS Account ID
+ region: process.env.AWS_REGION || "us-east-1", // Set your target AWS Region
+};
+
+export const singleNodeConfig: configTypes.AlloraSingleNodeConfig = {
+ instanceType: new ec2.InstanceType(process.env.AWS_INSTANCE_TYPE || 't3.medium'),
+ instanceCpuType: process.env.AWS_INSTANCE_CPU_TYPE?.toLowerCase() == "x86_64" ? ec2.AmazonLinuxCpuType.X86_64 : ec2.AmazonLinuxCpuType.ARM_64,
+ resourceNamePrefix: process.env.AWS_RESOURCE_NAME_PREFIX || 'AlloraWorkerx',
+ dataVolumes: [{
+ sizeGiB: process.env.EDGE_DATA_VOL_SIZE ? parseInt(process.env.EDGE_DATA_VOL_SIZE) : 256,
+ type: parseDataVolumeType(process.env.EDGE_DATA_VOL_TYPE?.toLowerCase() ? process.env.EDGE_DATA_VOL_TYPE?.toLowerCase() : "gp3"),
+ iops: process.env.EDGE_DATA_VOL_IOPS ? parseInt(process.env.EDGE_DATA_VOL_IOPS) : 10000,
+ throughput: process.env.EDGE_DATA_VOL_THROUGHPUT ? parseInt(process.env.EDGE_DATA_VOL_THROUGHPUT) : 700
+ }],
+ alloraWorkerName: process.env.ALLORA_WORKER_NAME || 'aws',
+ alloraEnv: process.env.ALLORA_ENV || 'dev',
+ modelRepo: process.env.MODEL_REPO || 'https://github.com/allora-network/basic-coin-prediction-node',
+ modelEnvVars: process.env.MODEL_ENV_VARS || `
+TOKEN="ETH"
+TRAINING_DAYS="1"
+TIMEFRAME="4h"
+MODEL="LinearRegression"
+REGION="US"
+DATA_PROVIDER="coingecko"
+CG_API_KEY="secret"
+`,
+
+ //Wallet config
+ alloraWalletAddressKeyName: process.env.ALLORA_WALLET_ADDRESS_KEY_NAME || 'secret',
+ alloraWalletAddressRestoreMnemonic: process.env.ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC || 'secret',
+ alloraWalletHomeDir: process.env.ALLORA_WALLET_HOME_DIR || '',
+ alloraWalletGas: process.env.ALLORA_WALLET_GAS || '1000000',
+ alloraWalletGasAdjustment: process.env.ALLORA_WALLET_GAS_ADJUSTMENT || '1.0',
+
+ alloraWalletGasPrices: process.env.ALLORA_WALLET_GAS_PRICES || 'auto',
+ alloraWalletGasPriceInterval: process.env.ALLORA_WALLET_GAS_PRICE_INTERVAL || '60',
+ alloraWalletRetryDelay: process.env.ALLORA_WALLET_RETRY_DELAY || '3',
+ alloraWalletBlockDurationEstimated: process.env.ALLORA_WALLET_BLOCK_DURATION_ESTIMATED || '10',
+ alloraWalletWindowCorrectionFactor: process.env.ALLORA_WALLET_WINDOW_CORRECTION_FACTOR || '0.8',
+ alloraWalletAccountSequenceRetryDelay: process.env.ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY || '5',
+
+ alloraWalletNodeRpc: process.env.ALLORA_WALLET_NODE_RPC || 'https://localhost:26657',
+ alloraWalletMaxRetries: process.env.ALLORA_WALLET_MAX_RETRIES || '1',
+ alloraWalletDelay: process.env.ALLORA_WALLET_DELAY || '1',
+ alloraWalletSubmitTx: process.env.ALLORA_WALLET_SUBMIT_TX || 'false',
+ alloraWalletMaxFees: process.env.ALLORA_WALLET_MAX_FEES || '500000',
+
+ //Worker Properties
+ alloraWorkerTopicId: process.env.ALLORA_WORKER_TOPIC_ID || '1',
+ alloraWorkerInferenceEntrypointName: process.env.ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME || 'api-worker-reputer',
+ alloraWorkerInferenceEndpoint: process.env.ALLORA_WORKER_INFERENCE_ENDPOINT || 'http://source:8000/inference/{Token}',
+ alloraWorkerLoopSeconds: process.env.ALLORA_WORKER_LOOP_SECONDS || '30',
+ alloraWorkerToken: process.env.ALLORA_WORKER_TOKEN || 'ethereum',
+
+ //Reputer Properties
+ alloraReputerTopicId: process.env.ALLORA_REPUTER_TOPIC_ID || '1',
+ alloraReputerEntrypointName: process.env.ALLORA_REPUTER_ENTRYPOINT_NAME || 'api-worker-reputer',
+ alloraReputerSourceOfTruthEndpoint: process.env.ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT || 'http://source:8888/truth/{Token}/{BlockHeight}',
+
+ alloraReputerLossFunctionService: process.env.ALLORA_REPUTER_LOSS_FUNCTION_SERVICE || 'http://localhost:5000',
+ alloraReputerLossMethodOptionsLossMethod: process.env.ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD || 'sqe',
+
+ alloraReputerLoopSeconds: process.env.ALLORA_REPUTER_LOOP_SECONDS || '30',
+ alloraReputerToken: process.env.ALLORA_REPUTER_TOKEN || 'ethereum',
+ alloraReputerMinStake: process.env.ALLORA_REPUTER_MIN_STAKE || '100000',
+};
+
+
diff --git a/lib/allora/lib/constructs/node-cw-dashboard.ts b/lib/allora/lib/constructs/node-cw-dashboard.ts
new file mode 100644
index 00000000..f993b38e
--- /dev/null
+++ b/lib/allora/lib/constructs/node-cw-dashboard.ts
@@ -0,0 +1,235 @@
+export const SingleNodeCWDashboardJSON = {
+ "widgets": [
+ {
+ "height": 5,
+ "width": 6,
+ "y": 0,
+ "x": 0,
+ "type": "metric",
+ "properties": {
+ "view": "timeSeries",
+ "stat": "Average",
+ "period": 300,
+ "stacked": false,
+ "yAxis": {
+ "left": {
+ "min": 0
+ }
+ },
+ "region": "${REGION}",
+ "metrics": [
+ [ "AWS/EC2", "CPUUtilization", "InstanceId", "${INSTANCE_ID}", { "label": "${INSTANCE_ID}-${INSTANCE_NAME}" } ]
+ ],
+ "title": "CPU utilization (%)"
+ }
+ },
+ {
+ "height": 5,
+ "width": 6,
+ "y": 5,
+ "x": 18,
+ "type": "metric",
+ "properties": {
+ "view": "timeSeries",
+ "stat": "Average",
+ "period": 300,
+ "stacked": false,
+ "yAxis": {
+ "left": {
+ "min": 0
+ }
+ },
+ "region": "${REGION}",
+ "metrics": [
+ [ "AWS/EC2", "NetworkIn", "InstanceId", "${INSTANCE_ID}", { "label": "${INSTANCE_ID}-${INSTANCE_NAME}" } ]
+ ],
+ "title": "Network in (bytes)"
+ }
+ },
+ {
+ "height": 5,
+ "width": 6,
+ "y": 0,
+ "x": 18,
+ "type": "metric",
+ "properties": {
+ "view": "timeSeries",
+ "stat": "Average",
+ "period": 300,
+ "stacked": false,
+ "yAxis": {
+ "left": {
+ "min": 0
+ }
+ },
+ "region": "${REGION}",
+ "metrics": [
+ [ "AWS/EC2", "NetworkOut", "InstanceId", "${INSTANCE_ID}", { "label": "${INSTANCE_ID}-${INSTANCE_NAME}" } ]
+ ],
+ "title": "Network out (bytes)"
+ }
+ },
+ {
+ "height": 5,
+ "width": 6,
+ "y": 10,
+ "x": 0,
+ "type": "metric",
+ "properties": {
+ "view": "timeSeries",
+ "stacked": false,
+ "region": "${REGION}",
+ "stat": "Average",
+ "period": 300,
+ "metrics": [
+ [ "CWAgent", "mem_used_percent", "InstanceId", "${INSTANCE_ID}", { "label": "${INSTANCE_ID}-${INSTANCE_NAME}" } ]
+ ],
+ "title": "Mem Used (%)"
+ }
+ },
+ {
+ "height": 5,
+ "width": 6,
+ "y": 5,
+ "x": 0,
+ "type": "metric",
+ "properties": {
+ "view": "timeSeries",
+ "stacked": false,
+ "region": "${REGION}",
+ "stat": "Average",
+ "period": 300,
+ "metrics": [
+ [ "CWAgent", "cpu_usage_iowait", "InstanceId", "${INSTANCE_ID}", { "label": "${INSTANCE_ID}-${INSTANCE_NAME}" } ]
+ ],
+ "title": "CPU Usage IO wait (%)"
+ }
+ },
+ {
+ "height": 5,
+ "width": 6,
+ "y": 0,
+ "x": 6,
+ "type": "metric",
+ "properties": {
+ "metrics": [
+ [ { "expression": "m7/PERIOD(m7)", "label": "Read", "id": "e7" } ],
+ [ "CWAgent", "diskio_reads", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m7", "visible": false, "stat": "Sum", "period": 60 } ],
+ [ { "expression": "m8/PERIOD(m8)", "label": "Write", "id": "e8" } ],
+ [ "CWAgent", "diskio_writes", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m8", "visible": false, "stat": "Sum", "period": 60 } ]
+ ],
+ "view": "timeSeries",
+ "stacked": false,
+ "region": "${REGION}",
+ "stat": "Sum",
+ "period": 60,
+ "title": `sda1 Volume Read/Write (IO/sec)`
+ }
+ },
+ {
+ "height": 4,
+ "width": 6,
+ "y": 0,
+ "x": 12,
+ "type": "metric",
+ "properties": {
+ "metrics": [
+ [ "CWAgent", "allora_current_block_height", "InstanceId", "${INSTANCE_ID}", { "label": "${INSTANCE_ID}-${INSTANCE_NAME}" } ]
+ ],
+ "sparkline": true,
+ "view": "timeSeries",
+ "stacked": false,
+ "region": "${REGION}",
+ "stat": "Maximum",
+ "period": 60,
+ "title": "Allora Client Block Height"
+ }
+ },
+ {
+ "height": 4,
+ "width": 6,
+ "y": 4,
+ "x": 12,
+ "type": "metric",
+ "properties": {
+ "sparkline": true,
+ "view": "timeSeries",
+ "stacked": false,
+ "region": "${REGION}",
+ "stat": "Maximum",
+ "period": 60,
+ "metrics": [
+ [ { "expression": "SELECT COUNT(edge_peer) FROM CWAgent GROUP BY InstanceId", "label": "${INSTANCE_ID}-${INSTANCE_NAME}" } ]
+ ],
+ "title": "Allora Client Peer Count"
+ }
+ },
+ {
+ "height": 5,
+ "width": 6,
+ "y": 5,
+ "x": 6,
+ "type": "metric",
+ "properties": {
+ "view": "timeSeries",
+ "stat": "Sum",
+ "period": 60,
+ "stacked": false,
+ "yAxis": {
+ "left": {
+ "min": 0
+ }
+ },
+ "region": "${REGION}",
+ "metrics": [
+ [ { "expression": "IF(m7_2 !=0, (m7_1 / m7_2), 0)", "label": "Read", "id": "e7" } ],
+ [ "CWAgent", "diskio_read_time", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m7_1", "visible": false, "stat": "Sum", "period": 60 } ],
+ [ "CWAgent", "diskio_reads", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m7_2", "visible": false, "stat": "Sum", "period": 60 } ],
+ [ { "expression": "IF(m7_4 !=0, (m7_3 / m7_4), 0)", "label": "Write", "id": "e8" } ],
+ [ "CWAgent", "diskio_write_time", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m7_3", "visible": false, "stat": "Sum", "period": 60 } ],
+ [ "CWAgent", "diskio_writes", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m7_4", "visible": false, "stat": "Sum", "period": 60 } ]
+ ],
+ "title": `sda1 Volume Read/Write latency (ms/op)`
+ }
+ },
+ {
+ "height": 5,
+ "width": 6,
+ "y": 10,
+ "x": 6,
+ "type": "metric",
+ "properties": {
+ "metrics": [
+ [ { "expression": "(m2/1048576)/PERIOD(m2)", "label": "Read", "id": "e2", "period": 60, "region": "${REGION}" } ],
+ [ "CWAgent", "diskio_read_bytes", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m2", "stat": "Sum", "visible": false, "period": 60 } ],
+ [ { "expression": "(m3/1048576)/PERIOD(m3)", "label": "Write", "id": "e3", "period": 60, "region": "${REGION}" } ],
+ [ "CWAgent", "diskio_write_bytes", "InstanceId", "${INSTANCE_ID}", "name", "sda1", { "id": "m3", "stat": "Sum", "visible": false, "period": 60 } ]
+ ],
+ "view": "timeSeries",
+ "stacked": false,
+ "region": "${REGION}",
+ "stat": "Average",
+ "period": 60,
+ "title": `sda1 Volume Read/Write throughput (MiB/sec)`
+ }
+ },
+ {
+ "height": 3,
+ "width": 6,
+ "y": 15,
+ "x": 6,
+ "type": "metric",
+ "properties": {
+ "metrics": [
+ [ "CWAgent", "disk_used_percent", "path", "/data", "InstanceId", "${INSTANCE_ID}", "device", "sda1", "fstype", "ext4", { "region": "${REGION}", "label": "/data" } ]
+ ],
+ "sparkline": true,
+ "view": "singleValue",
+ "region": "${REGION}",
+ "title": `sda1 Disk Used (%)`,
+ "period": 60,
+ "stat": "Average"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/lib/allora/lib/constructs/node-security-group.ts b/lib/allora/lib/constructs/node-security-group.ts
new file mode 100644
index 00000000..0085f62e
--- /dev/null
+++ b/lib/allora/lib/constructs/node-security-group.ts
@@ -0,0 +1,49 @@
+import * as cdk from "aws-cdk-lib";
+import * as cdkContructs from 'constructs';
+import * as ec2 from "aws-cdk-lib/aws-ec2";
+import * as nag from "cdk-nag";
+
+export interface NodeSecurityGroupConstructProps {
+ vpc: cdk.aws_ec2.IVpc;
+ }
+
+ export class NodeSecurityGroupConstruct extends cdkContructs.Construct {
+ public securityGroup: cdk.aws_ec2.ISecurityGroup;
+
+ constructor(scope: cdkContructs.Construct, id: string, props: NodeSecurityGroupConstructProps) {
+ super(scope, id);
+
+ const {
+ vpc,
+ } = props;
+
+ const sg = new ec2.SecurityGroup(this, `node-security-group`, {
+ vpc,
+ description: "Security Group for Allora Blockchain nodes",
+ allowAllOutbound: true,
+ });
+
+ // Public ports
+ // sg.addIngressRule(ec2.Peer.anyIpv4(), ec2.Port.tcp(9010), "Allow inbound TCP 9010");
+
+ // Private ports restricted only to the VPC IP range
+ sg.addIngressRule(ec2.Peer.ipv4(vpc.vpcCidrBlock), ec2.Port.tcp(8000), "ALLORA Offchain Source");
+
+ this.securityGroup = sg
+
+ /**
+ * cdk-nag suppressions
+ */
+
+ nag.NagSuppressions.addResourceSuppressions(
+ this,
+ [
+ {
+ id: "AwsSolutions-EC23",
+ reason: "Allora requires wildcard inbound for specific ports",
+ },
+ ],
+ true
+ );
+ }
+ }
diff --git a/lib/allora/lib/single-node-stack.ts b/lib/allora/lib/single-node-stack.ts
new file mode 100644
index 00000000..7896f34e
--- /dev/null
+++ b/lib/allora/lib/single-node-stack.ts
@@ -0,0 +1,266 @@
+import * as cdk from 'aws-cdk-lib';
+import { Construct } from 'constructs';
+import * as ec2 from 'aws-cdk-lib/aws-ec2';
+import * as s3Assets from "aws-cdk-lib/aws-s3-assets";
+import { SingleNodeConstruct } from "../../constructs/single-node"
+import * as fs from 'fs';
+import * as path from 'path';
+import * as nag from "cdk-nag";
+import * as iam from "aws-cdk-lib/aws-iam";
+import * as configTypes from "../../constructs/config.interface";
+import { NodeSecurityGroupConstruct } from "./constructs/node-security-group";
+import * as nodeCwDashboard from "./constructs/node-cw-dashboard"
+import * as cw from 'aws-cdk-lib/aws-cloudwatch';
+
+interface AlloraStackEnvironment extends cdk.Environment {
+ account: string;
+ region: string;
+}
+
+export interface AlloraStackProps extends cdk.StackProps {
+ instanceType: ec2.InstanceType;
+ instanceCpuType: ec2.AmazonLinuxCpuType;
+ resourceNamePrefix: string;
+ dataVolumes: configTypes.DataVolumeConfig[];
+ env: AlloraStackEnvironment
+ alloraWorkerName: string;
+ alloraEnv: string;
+ modelRepo: string;
+ modelEnvVars: string;
+
+ alloraWalletAddressKeyName: string;
+ alloraWalletAddressRestoreMnemonic: string;
+ alloraWalletHomeDir: string;
+ alloraWalletGas: string,
+ alloraWalletGasAdjustment: string;
+
+ alloraWalletGasPrices: string;
+ alloraWalletGasPriceInterval: string;
+ alloraWalletRetryDelay: string;
+ alloraWalletBlockDurationEstimated: string;
+ alloraWalletWindowCorrectionFactor: string;
+ alloraWalletMaxFees: string;
+ alloraWalletAccountSequenceRetryDelay: string;
+
+ alloraWalletNodeRpc: string;
+ alloraWalletMaxRetries: string;
+ alloraWalletDelay: string;
+ alloraWalletSubmitTx: string;
+
+ alloraWorkerTopicId: string;
+ alloraWorkerInferenceEntrypointName: string;
+ alloraWorkerInferenceEndpoint: string;
+ alloraWorkerLoopSeconds: string;
+ alloraWorkerToken: string;
+
+ alloraReputerTopicId: string;
+ alloraReputerEntrypointName: string;
+ alloraReputerSourceOfTruthEndpoint: string;
+ alloraReputerLoopSeconds: string;
+ alloraReputerToken: string;
+ alloraReputerMinStake: string;
+
+ alloraReputerLossFunctionService: string;
+ alloraReputerLossMethodOptionsLossMethod: string;
+}
+
+
+export class AlloraStack extends cdk.Stack {
+ constructor(scope: Construct, id: string, props: AlloraStackProps) {
+ super(scope, id, props);
+
+ const {
+ env,
+ instanceType,
+ instanceCpuType,
+ resourceNamePrefix,
+ dataVolumes,
+ alloraWorkerName,
+ alloraEnv,
+ modelRepo,
+ modelEnvVars,
+
+ //wallet props
+ alloraWalletAddressKeyName,
+ alloraWalletAddressRestoreMnemonic,
+ alloraWalletHomeDir,
+ alloraWalletGas,
+ alloraWalletGasAdjustment,
+
+ alloraWalletGasPrices,
+ alloraWalletGasPriceInterval,
+ alloraWalletRetryDelay,
+ alloraWalletBlockDurationEstimated,
+ alloraWalletWindowCorrectionFactor,
+ alloraWalletMaxFees,
+ alloraWalletAccountSequenceRetryDelay,
+
+ alloraWalletNodeRpc,
+ alloraWalletMaxRetries,
+ alloraWalletDelay,
+ alloraWalletSubmitTx,
+
+ //worker props
+ alloraWorkerTopicId,
+ alloraWorkerInferenceEntrypointName,
+ alloraWorkerInferenceEndpoint,
+ alloraWorkerLoopSeconds,
+ alloraWorkerToken,
+
+ //reputer props
+ alloraReputerTopicId,
+ alloraReputerEntrypointName,
+ alloraReputerSourceOfTruthEndpoint,
+
+ alloraReputerLossFunctionService,
+ alloraReputerLossMethodOptionsLossMethod,
+
+ alloraReputerLoopSeconds,
+ alloraReputerToken,
+ alloraReputerMinStake,
+ } = props;
+ const { region } = env;
+
+ const STACK_NAME = cdk.Stack.of(this).stackName;
+ const STACK_ID = cdk.Stack.of(this).stackId;
+ const availabilityZones = cdk.Stack.of(this).availabilityZones;
+ const chosenAvailabilityZone = availabilityZones.slice(0, 1)[0];
+
+ // Using default VPC
+ const vpc = ec2.Vpc.fromLookup(this, "vpc", { isDefault: true });
+
+ // Setting up the security group for the node from Ethereum-specific construct
+ const instanceSG = new NodeSecurityGroupConstruct (this, "security-group", {
+ vpc: vpc,
+ })
+
+ // Making our scripts and configs from the local "assets" directory available for instance to download
+ const asset = new s3Assets.Asset(this, "assets", {
+ path: path.join(__dirname, "assets"),
+ });
+
+ // Getting the snapshot bucket name and IAM role ARN from the common stack
+ const importedInstanceRoleArn = cdk.Fn.importValue("EdgeNodeInstanceRoleArn");
+
+ const instanceRole = iam.Role.fromRoleArn(this, "iam-role", importedInstanceRoleArn);
+
+ // Making sure our instance will be able to read the assets
+ asset.bucket.grantRead(instanceRole);
+
+ // Setting up the node using generic Single Node constract
+ const node = new SingleNodeConstruct(this, "single-node", {
+ instanceName: STACK_NAME,
+ instanceType,
+ dataVolumes: dataVolumes,
+ machineImage: new ec2.AmazonLinuxImage({
+ generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2023,
+ kernel:ec2.AmazonLinuxKernel.KERNEL6_1,
+ cpuType: instanceCpuType,
+ }),
+ vpc,
+ availabilityZone: chosenAvailabilityZone,
+ role: instanceRole,
+ securityGroup: instanceSG.securityGroup,
+ vpcSubnets: {
+ subnetType: ec2.SubnetType.PUBLIC,
+ },
+ });
+
+ const instance = node.instance;
+
+ // Read user data script and inject variables
+ const userData = fs.readFileSync(path.join(__dirname, 'assets', 'user-data-alinux.sh')).toString();
+ const modifiedUserData = cdk.Fn.sub(userData, {
+ _AWS_REGION_: region,
+ _ASSETS_S3_PATH_: `s3://${asset.s3BucketName}/${asset.s3ObjectKey}`,
+ _NODE_CF_LOGICAL_ID_: node.nodeCFLogicalId,
+ _STACK_NAME_: STACK_NAME,
+ _STACK_ID_: STACK_ID,
+ _ALLORA_WORKER_NAME_: alloraWorkerName,
+ _ALLORA_ENV_: alloraEnv,
+ _MODEL_REPO_: modelRepo,
+ _MODEL_ENV_VARS_: modelEnvVars,
+
+ //wallet config
+ _ALLORA_WALLET_ADDRESS_KEY_NAME_ : alloraWalletAddressKeyName,
+ _ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC_ : alloraWalletAddressRestoreMnemonic,
+ _ALLORA_WALLET_HOME_DIR_: alloraWalletHomeDir,
+ _ALLORA_WALLET_GAS_ADJUSTMENT_: alloraWalletGasAdjustment,
+ _ALLORA_WALLET_GAS_: alloraWalletGas,
+
+ _ALLORA_WALLET_GAS_PRICES_: alloraWalletGasPrices,
+ _ALLORA_WALLET_GAS_PRICE_INTERVAL_: alloraWalletGasPriceInterval,
+ _ALLORA_WALLET_RETRY_DELAY_: alloraWalletRetryDelay,
+ _ALLORA_WALLET_BLOCK_DURATION_ESTIMATED_: alloraWalletBlockDurationEstimated,
+ _ALLORA_WALLET_WINDOW_CORRECTION_FACTOR_: alloraWalletWindowCorrectionFactor,
+ _ALLORA_WALLET_MAX_FEES_: alloraWalletMaxFees,
+ _ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY_: alloraWalletAccountSequenceRetryDelay,
+
+ _ALLORA_WALLET_NODE_RPC_: alloraWalletNodeRpc,
+ _ALLORA_WALLET_MAX_RETRIES_: alloraWalletMaxRetries,
+ _ALLORA_WALLET_DELAY_: alloraWalletDelay,
+ _ALLORA_WALLET_SUBMIT_TX_: alloraWalletSubmitTx,
+
+ //worker config
+ _ALLORA_WORKER_TOPIC_ID_: alloraWorkerTopicId,
+ _ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME_: alloraWorkerInferenceEntrypointName,
+ _ALLORA_WORKER_INFERENCE_ENDPOINT_: alloraWorkerInferenceEndpoint,
+ _ALLORA_WORKER_LOOP_SECONDS_: alloraWorkerLoopSeconds,
+ _ALLORA_WORKER_TOKEN_: alloraWorkerToken,
+
+ //reputer config
+ _ALLORA_REPUTER_TOPIC_ID_: alloraReputerTopicId,
+ _ALLORA_REPUTER_ENTRYPOINT_NAME_: alloraReputerEntrypointName,
+ _ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT_: alloraReputerSourceOfTruthEndpoint,
+
+ _ALLORA_REPUTER_LOSS_FUNCTION_SERVICE_: alloraReputerLossFunctionService,
+ _ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD_: alloraReputerLossMethodOptionsLossMethod,
+
+ _ALLORA_REPUTER_LOOP_SECONDS_: alloraReputerLoopSeconds,
+ _ALLORA_REPUTER_TOKEN_: alloraReputerToken,
+ _ALLORA_REPUTER_MIN_STAKE_: alloraReputerMinStake,
+
+
+ });
+
+ instance.addUserData(modifiedUserData);
+
+ const dashboardString = cdk.Fn.sub(JSON.stringify(nodeCwDashboard.SingleNodeCWDashboardJSON), {
+ INSTANCE_ID: node.instanceId,
+ INSTANCE_NAME: `${resourceNamePrefix}Instance`,
+ REGION: region,
+ });
+
+ new cw.CfnDashboard(this, 'single-cw-dashboard', {
+ dashboardName: `AlloraStack-${node.instanceId}`,
+ dashboardBody: dashboardString,
+ });
+
+ new cdk.CfnOutput(this, "node-instance-id", {
+ value: node.instanceId,
+ });
+
+ nag.NagSuppressions.addResourceSuppressions(
+ this,
+ [
+ {
+id: "AwsSolutions-EC23",
+ reason: "Inbound access from any IP is required for this application.",
+ },
+ {
+ id: "AwsSolutions-IAM4",
+ reason: "This IAM role requires broad permissions to function correctly.",
+ },
+ {
+ id: "AwsSolutions-IAM5",
+ reason: "Full access is needed for administrative tasks.",
+ },
+ {
+ id: "AwsSolutions-EC2",
+ reason: "Unrestricted access is required for the instance to operate correctly.",
+ },
+ ],
+ true
+ );
+ }
+}
diff --git a/lib/allora/package.json b/lib/allora/package.json
new file mode 100644
index 00000000..8a130c9a
--- /dev/null
+++ b/lib/allora/package.json
@@ -0,0 +1,13 @@
+{
+ "name": "allora",
+ "version": "0.1.0",
+ "bin": {
+ "run": "app.js"
+ },
+ "scripts": {
+ "build": "tsc",
+ "watch": "tsc -w",
+ "test": "jest",
+ "cdk": "cdk"
+ }
+}
diff --git a/lib/allora/sample-configs/.env-sample-full b/lib/allora/sample-configs/.env-sample-full
new file mode 100644
index 00000000..ff348bf9
--- /dev/null
+++ b/lib/allora/sample-configs/.env-sample-full
@@ -0,0 +1,72 @@
+#allora node run config
+
+# Must fill all these in
+AWS_ACCOUNT_ID="xxxxxxxxxxx"
+AWS_REGION="us-east-1"
+
+# # Allora configuration starts here
+ALLORA_WALLET_ADDRESS_KEY_NAME="xxxxxxxxxxx"
+ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC="xxxxxxxxxxx" #if you don't have this, generate with allocmd
+MODEL_REPO="https://github.com/allora-network/basic-coin-prediction-node"
+MODEL_ENV_VARS='
+TOKEN="ETH"
+TRAINING_DAYS="1"
+TIMEFRAME="4h"
+MODEL="LinearRegression"
+REGION="US"
+DATA_PROVIDER="coingecko"
+CG_API_KEY="secret"
+'
+###############################
+
+# AWS_RESOURCE_NAME_PREFIX="AlloraWorkerx"
+AWS_INSTANCE_TYPE="t3.medium"
+AWS_INSTANCE_CPU_TYPE="x86_64" # All options: "x86_64", "ARM_64". IMPORTANT: Make sure the CPU type matches the instance type used
+
+# Data volume configuration
+EDGE_DATA_VOL_TYPE="gp3" # Other options: "io1" | "io2" | "gp3" | "instance-store" . IMPORTANT: "instance-store" NOT recommended as it is ephermal and will be reset after stopping the instance. Use "instance-store" option only with instance types that support that feature, like popular for node g4dn, d3, i3en, and i4i instance families
+EDGE_DATA_VOL_SIZE="256" # Current required data size to keep both snapshot archive and unarchived version of it (not applicable for "instance-store")
+EDGE_DATA_VOL_IOPS="3000" # Max IOPS for EBS volumes (not applicable for "instance-store")
+EDGE_DATA_VOL_THROUGHPUT="125" # Max throughput for EBS gp3 volumes (not applicable for "io1" | "io2" | "instance-store")
+
+# # Allora Node Worker Generation Config
+# ALLORA_WORKER_NAME="aws"
+# ALLORA_ENV="dev"
+
+# # Wallet config
+# ALLORA_WALLET_HOME_DIR=""
+# ALLORA_WALLET_GAS="1000000"
+# ALLORA_WALLET_GAS_ADJUSTMENT="1.0"
+
+# ALLORA_WALLET_GAS_PRICES="auto"
+# ALLORA_WALLET_GAS_PRICE_INTERVAL="60"
+# ALLORA_WALLET_RETRY_DELAY="3"
+# ALLORA_WALLET_BLOCK_DURATION_ESTIMATED="10"
+# ALLORA_WALLET_WINDOW_CORRECTION_FACTOR="0.8"
+# ALLORA_WALLET_MAX_FEES="500000"
+# ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY="5"
+
+ALLORA_WALLET_NODE_RPC="https://allora-rpc.testnet.allora.network"
+
+# ALLORA_WALLET_MAX_RETRIES="1"
+# ALLORA_WALLET_DELAY="1"
+# ALLORA_WALLET_SUBMIT_TX="false"
+
+# # Worker config
+# ALLORA_WORKER_TOPIC_ID="1"
+# ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME="api-worker-reputer"
+# ALLORA_WORKER_INFERENCE_ENDPOINT="http://source:8000/inference/{Token}"
+
+# ALLORA_REPUTER_LOSS_FUNCTION_SERVICE="http://localhost:5000"
+# ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD="sqe"
+
+# ALLORA_WORKER_LOOP_SECONDS="30"
+# ALLORA_WORKER_TOKEN="ethereum"
+
+# # Reputer config
+# ALLORA_REPUTER_TOPIC_ID="1"
+# ALLORA_REPUTER_ENTRYPOINT_NAME="api-worker-reputer"
+# ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT="http://source:8888/truth/{Token}/{BlockHeight}"
+# ALLORA_REPUTER_LOOP_SECONDS="30"
+# ALLORA_REPUTER_TOKEN="ethereum"
+# ALLORA_REPUTER_MIN_STAKE="100000"
diff --git a/lib/allora/test/.env-test b/lib/allora/test/.env-test
new file mode 100644
index 00000000..eddd7311
--- /dev/null
+++ b/lib/allora/test/.env-test
@@ -0,0 +1,73 @@
+#allora node run config
+
+# Must fill all these in
+AWS_ACCOUNT_ID="xxxxxxxxxxx"
+AWS_REGION="us-east-1"
+
+# # Allora configuration starts here
+# ALLORA_WALLET_ADDRESS_KEY_NAME="xxxxxxxxxxx"
+# ALLORA_WALLET_ADDRESS_RESTORE_MNEMONIC="xxxxxxxxxxx" #if you don't have this, generate with allocmd
+# AWS_ACCOUNT_ID="xxxxxxxxxxx"
+# MODEL_REPO="https://github.com/allora-network/basic-coin-prediction-node"
+# MODEL_ENV_VARS='
+# TOKEN="ETH"
+# TRAINING_DAYS="1"
+# TIMEFRAME="4h"
+# MODEL="LinearRegression"
+# REGION="US"
+# DATA_PROVIDER="coingecko"
+# CG_API_KEY="secret"
+# '
+###############################
+
+# AWS_RESOURCE_NAME_PREFIX="AlloraWorkerx"
+AWS_INSTANCE_TYPE="t3.medium"
+AWS_INSTANCE_CPU_TYPE="x86_64" # All options: "x86_64", "ARM_64". IMPORTANT: Make sure the CPU type matches the instance type used
+
+# Data volume configuration
+EDGE_DATA_VOL_TYPE="gp3" # Other options: "io1" | "io2" | "gp3" | "instance-store" . IMPORTANT: "instance-store" NOT recommended as it is ephermal and will be reset after stopping the instance. Use "instance-store" option only with instance types that support that feature, like popular for node g4dn, d3, i3en, and i4i instance families
+EDGE_DATA_VOL_SIZE="256" # Current required data size to keep both snapshot archive and unarchived version of it (not applicable for "instance-store")
+EDGE_DATA_VOL_IOPS="3000" # Max IOPS for EBS volumes (not applicable for "instance-store")
+EDGE_DATA_VOL_THROUGHPUT="125" # Max throughput for EBS gp3 volumes (not applicable for "io1" | "io2" | "instance-store")
+
+# # Allora Node Worker Generation Config
+# ALLORA_WORKER_NAME="aws"
+# ALLORA_ENV="dev"
+
+# # Wallet config
+# ALLORA_WALLET_HOME_DIR=""
+# ALLORA_WALLET_GAS="1000000"
+# ALLORA_WALLET_GAS_ADJUSTMENT="1.0"
+
+# ALLORA_WALLET_GAS_PRICES="auto"
+# ALLORA_WALLET_GAS_PRICE_INTERVAL="60"
+# ALLORA_WALLET_RETRY_DELAY="3"
+# ALLORA_WALLET_BLOCK_DURATION_ESTIMATED="10"
+# ALLORA_WALLET_WINDOW_CORRECTION_FACTOR="0.8"
+# ALLORA_WALLET_MAX_FEES="500000"
+# ALLORA_WALLET_ACCOUNT_SEQUENCE_RETRY_DELAY="5"
+
+ALLORA_WALLET_NODE_RPC="https://allora-rpc.testnet.allora.network"
+
+# ALLORA_WALLET_MAX_RETRIES="1"
+# ALLORA_WALLET_DELAY="1"
+# ALLORA_WALLET_SUBMIT_TX="false"
+
+# # Worker config
+# ALLORA_WORKER_TOPIC_ID="1"
+# ALLORA_WORKER_INFERENCE_ENTRYPOINT_NAME="api-worker-reputer"
+# ALLORA_WORKER_INFERENCE_ENDPOINT="http://source:8000/inference/{Token}"
+
+# ALLORA_REPUTER_LOSS_FUNCTION_SERVICE="http://localhost:5000"
+# ALLORA_REPUTER_LOSS_METHOD_OPTIONS_LOSS_METHOD="sqe"
+
+# ALLORA_WORKER_LOOP_SECONDS="30"
+# ALLORA_WORKER_TOKEN="ethereum"
+
+# # Reputer config
+# ALLORA_REPUTER_TOPIC_ID="1"
+# ALLORA_REPUTER_ENTRYPOINT_NAME="api-worker-reputer"
+# ALLORA_REPUTER_SOURCE_OF_TRUTH_ENDPOINT="http://source:8888/truth/{Token}/{BlockHeight}"
+# ALLORA_REPUTER_LOOP_SECONDS="30"
+# ALLORA_REPUTER_TOKEN="ethereum"
+# ALLORA_REPUTER_MIN_STAKE="100000"
diff --git a/lib/allora/test/allora-worker.test.ts b/lib/allora/test/allora-worker.test.ts
new file mode 100644
index 00000000..a32dbb30
--- /dev/null
+++ b/lib/allora/test/allora-worker.test.ts
@@ -0,0 +1,83 @@
+import * as cdk from 'aws-cdk-lib';
+import { Template, Match } from 'aws-cdk-lib/assertions';
+import { AlloraStack } from '../lib/single-node-stack';
+import * as dotenv from 'dotenv';
+dotenv.config({ path: './test/.env-test' });
+import { baseConfig, singleNodeConfig } from '../lib/config/node-config';
+
+describe("AlloranodeStack", () => {
+ test('Stack has correct resources', () => {
+ const app = new cdk.App();
+ const stack = new AlloraStack(app, 'TestStack', {
+ stackName: 'allora-single-node',
+ env: {
+ account: baseConfig.accountId,
+ region: baseConfig.region
+ },
+ ...singleNodeConfig
+ });
+
+ const template = Template.fromStack(stack);
+
+ // Check for Security Group with inbound TCP 9010 rule
+ template.hasResourceProperties('AWS::EC2::SecurityGroup', {
+ SecurityGroupEgress: [
+ {
+ CidrIp: "0.0.0.0/0",
+ Description: "Allow all outbound traffic by default",
+ IpProtocol: "-1"
+ }
+ ],
+ SecurityGroupIngress: [
+ {
+ CidrIp: "1.2.3.4/5",
+ Description: "ALLORA Offchain Source",
+ FromPort: 8000,
+ IpProtocol: "tcp",
+ ToPort: 8000
+ }
+ ],
+ });
+
+ // Check for EC2 Instance
+ template.hasResourceProperties('AWS::EC2::Instance', {
+ InstanceType: 't3.medium',
+ ImageId: Match.anyValue(),
+ BlockDeviceMappings: [
+ {
+ DeviceName: '/dev/xvda',
+ Ebs: {
+ VolumeSize: 46,
+ VolumeType: 'gp3',
+ },
+ },
+ ],
+ });
+
+ // Has EBS data volume.
+ template.hasResourceProperties("AWS::EC2::Volume", {
+ AvailabilityZone: Match.anyValue(),
+ Encrypted: true,
+ Iops: 3000,
+ MultiAttachEnabled: false,
+ Size: 256,
+ Throughput: 125,
+ VolumeType: "gp3"
+ })
+
+ // Has EBS data volume attachment.
+ template.hasResourceProperties("AWS::EC2::VolumeAttachment", {
+ Device: "/dev/sdf",
+ InstanceId: Match.anyValue(),
+ VolumeId: Match.anyValue(),
+ })
+
+ // Has CloudWatch dashboard.
+ template.hasResourceProperties("AWS::CloudWatch::Dashboard", {
+ DashboardBody: Match.anyValue(),
+ DashboardName: {
+ "Fn::Join": ["", ["AlloraStack-", { "Ref": Match.anyValue() }]]
+ }
+ })
+ });
+});
diff --git a/lib/allora/test/common-stack.test.ts b/lib/allora/test/common-stack.test.ts
new file mode 100644
index 00000000..91cb25a0
--- /dev/null
+++ b/lib/allora/test/common-stack.test.ts
@@ -0,0 +1,63 @@
+import { Match, Template } from "aws-cdk-lib/assertions";
+import * as cdk from "aws-cdk-lib";
+import * as dotenv from 'dotenv';
+dotenv.config({ path: './test/.env-test' });
+import * as config from "../lib/config/node-config";
+import { AlloraCommonStack } from "../lib/common-stack";
+
+describe("AlloraCommonStack", () => {
+ test("synthesizes the way we expect", () => {
+ const app = new cdk.App();
+
+ // Create the AlloraCommonStack.
+ const alloraCommonStack = new AlloraCommonStack(app, "allora-common", {
+ env: { account: config.baseConfig.accountId, region: config.baseConfig.region },
+ stackName: `eth-nodes-common`,
+ });
+
+ // Prepare the stack for assertions.
+ const template = Template.fromStack(alloraCommonStack);
+
+ // Has EC2 instance role.
+ template.hasResourceProperties("AWS::IAM::Role", {
+ AssumeRolePolicyDocument: {
+ Statement: [
+ {
+ Action: "sts:AssumeRole",
+ Effect: "Allow",
+ Principal: {
+ Service: "ec2.amazonaws.com"
+ }
+ }
+ ]
+ },
+ ManagedPolicyArns: [
+ {
+ "Fn::Join": [
+ "",
+ [
+ "arn:",
+ {
+ Ref: "AWS::Partition"
+ },
+ ":iam::aws:policy/AmazonSSMManagedInstanceCore"
+ ]
+ ]
+ },
+ {
+ "Fn::Join": [
+ "",
+ [
+ "arn:",
+ {
+ "Ref": "AWS::Partition"
+ },
+ ":iam::aws:policy/CloudWatchAgentServerPolicy"
+ ]
+ ]
+ }
+ ]
+ })
+
+ });
+});
diff --git a/lib/allora/tsconfig.json b/lib/allora/tsconfig.json
new file mode 100644
index 00000000..8e1979f3
--- /dev/null
+++ b/lib/allora/tsconfig.json
@@ -0,0 +1,31 @@
+{
+ "compilerOptions": {
+ "target": "ES2020",
+ "module": "commonjs",
+ "lib": [
+ "es2020",
+ "dom"
+ ],
+ "declaration": true,
+ "strict": true,
+ "noImplicitAny": true,
+ "strictNullChecks": true,
+ "noImplicitThis": true,
+ "alwaysStrict": true,
+ "noUnusedLocals": false,
+ "noUnusedParameters": false,
+ "noImplicitReturns": true,
+ "noFallthroughCasesInSwitch": false,
+ "inlineSourceMap": true,
+ "inlineSources": true,
+ "experimentalDecorators": true,
+ "strictPropertyInitialization": false,
+ "typeRoots": [
+ "../../node_modules/@types"
+ ]
+ },
+ "exclude": [
+ "node_modules",
+ "cdk.out"
+ ]
+}
diff --git a/website/docs/Blueprints/Allora.md b/website/docs/Blueprints/Allora.md
new file mode 100644
index 00000000..4945273f
--- /dev/null
+++ b/website/docs/Blueprints/Allora.md
@@ -0,0 +1,9 @@
+---
+sidebar_position: 10
+sidebar_label: Allora
+---
+#
+
+import Readme from '../../../lib/allora/README.md';
+
+