diff --git a/.github/workflows/last-reviewed-backfill.yml b/.github/workflows/last-reviewed-backfill.yml new file mode 100644 index 0000000000..7dc8722df7 --- /dev/null +++ b/.github/workflows/last-reviewed-backfill.yml @@ -0,0 +1,125 @@ +name: Last Reviewed Date Backfill (One Time) + +on: + workflow_dispatch: + inputs: + dry_run: + description: "Log actions only (no writes)" + type: boolean + default: true + +permissions: + contents: read + pull-requests: read + repository-projects: write + +jobs: + backfill: + runs-on: ubuntu-latest + steps: + - name: Backfill Last Reviewed Date + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.PROJECT_TOKEN }} + script: | + const dryRun = core.getInput('dry_run') === 'true'; + + const orgLogin = 'ArmDeveloperEcosystem'; + const projectNumber = 4; + + const ISO_CUTOFF = '2024-12-31'; + const toDate = (iso) => new Date(iso + 'T00:00:00.000Z'); + + // 1) project + const proj = await github.graphql( + `query($org:String!,$num:Int!){ organization(login:$org){ projectV2(number:$num){ id } } }`, + { org: orgLogin, num: projectNumber } + ); + const projectId = proj.organization?.projectV2?.id; + if (!projectId) throw new Error('Project not found'); + + // 2) fields + const fields = (await github.graphql( + `query($id:ID!){ node(id:$id){ ... on ProjectV2 { + fields(first:50){ nodes{ + __typename + ... on ProjectV2Field{ id name dataType } + ... on ProjectV2SingleSelectField{ id name options{ id name } } + } } } } }`, { id: projectId } + )).node.fields.nodes; + + const dateFieldId = (n)=>fields.find(f=>f.__typename==='ProjectV2Field'&&f.name===n&&f.dataType==='DATE')?.id||null; + const statusField = fields.find(f=>f.__typename==='ProjectV2SingleSelectField' && f.name==='Status'); + const statusFieldId = statusField?.id; + const publishId = dateFieldId('Publish Date'); + const lrdId = dateFieldId('Last Reviewed Date'); + + if (!statusFieldId || !lrdId) throw new Error('Missing Status or Last Reviewed Date field'); + + // writers + const setDate = async (itemId, fieldId, iso) => { + if (dryRun) return console.log(`[DRY RUN] setDate item=${itemId} -> ${iso}`); + const m = `mutation($p:ID!,$i:ID!,$f:ID!,$d:Date!){ + updateProjectV2ItemFieldValue(input:{projectId:$p,itemId:$i,fieldId:$f,value:{date:$d}}){ + projectV2Item{ id } + }}`; + await github.graphql(m, { p: projectId, i: itemId, f: fieldId, d: iso }); + }; + + // helpers + const getDate = (item,id)=>item.fieldValues.nodes.find(n=>n.__typename==='ProjectV2ItemFieldDateValue'&&n.field?.id===id)?.date||null; + const getStatus = (item)=>{ const n=item.fieldValues.nodes.find(n=>n.__typename==='ProjectV2ItemFieldSingleSelectValue'&&n.field?.id===statusFieldId); return n?.name||null; }; + + // iterate + async function* items(){ let cursor=null; for(;;){ + const r=await github.graphql( + `query($org:String!,$num:Int!,$after:String){ + organization(login:$org){ projectV2(number:$num){ + items(first:100, after:$after){ + nodes{ + id + content{ __typename ... on PullRequest{ number repository{ name } } } + fieldValues(first:50){ nodes{ + __typename + ... on ProjectV2ItemFieldDateValue{ field{ ... on ProjectV2Field{ id name } } date } + ... on ProjectV2ItemFieldSingleSelectValue{ field{ ... on ProjectV2SingleSelectField{ id name } } name optionId } + } } + } + pageInfo{ hasNextPage endCursor } + } + } } }`, + { org: orgLogin, num: projectNumber, after: cursor } + ); + const page=r.organization.projectV2.items; + for(const n of page.nodes) yield n; + if(!page.pageInfo.hasNextPage) break; + cursor=page.pageInfo.endCursor; + } } + + let updates=0; + for await (const item of items()){ + if (item.content?.__typename !== 'PullRequest') continue; + + const status = getStatus(item); + if (status !== 'Done' && status !== 'Maintenance') continue; + + const lrd = getDate(item, lrdId); + if (lrd) continue; // already has a value + + if (status === 'Done') { + const publish = publishId ? getDate(item, publishId) : null; + if (publish) { + await setDate(item.id, lrdId, publish); + updates++; console.log(`[Backfill][Done] Set LRD=${publish}`); + } else { + console.log(`[Skip][Done] No Publish Date; not setting LRD`); + } + } + + if (status === 'Maintenance') { + await setDate(item.id, lrdId, ISO_CUTOFF); + updates++; console.log(`[Backfill][Maintenance] Set LRD=${ISO_CUTOFF}`); + } + } + + console.log(`Backfill complete. Items updated: ${updates}. Dry run: ${dryRun}`); \ No newline at end of file diff --git a/.github/workflows/last-reviewed-cron.yml b/.github/workflows/last-reviewed-cron.yml new file mode 100644 index 0000000000..c78a8a9dbd --- /dev/null +++ b/.github/workflows/last-reviewed-cron.yml @@ -0,0 +1,254 @@ +name: Last Reviewed Cron + +on: + schedule: + - cron: "0 9 * * *" # daily at 09:00 UTC + workflow_dispatch: + inputs: + dry_run: + description: "Log actions only (no writes)" + type: boolean + default: false + +permissions: + contents: read + pull-requests: read + repository-projects: write + +jobs: + sweep: + runs-on: ubuntu-latest + steps: + - name: Move items based on Last Reviewed Date + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.PROJECT_TOKEN }} + script: | + // Inputs + const dryRun = core.getInput('dry_run') === 'true'; + + // ---- Config (edit if needed) ---- + const orgLogin = 'ArmDeveloperEcosystem'; + const projectNumber = 4; + const STATUS_FIELD_NAME = 'Status'; + const STATUS_DONE = 'Done'; + const STATUS_MAINT = 'Maintenance'; + const LRD_FIELD_NAME = 'Last Reviewed Date'; + const PUBLISHED_URL_FIELD_NAME = 'Published URL'; + // ---------------------------------- + + // Dates + const TODAY = new Date(); + const sixMonthsAgoISO = (() => { + const d = new Date(TODAY); + d.setUTCMonth(d.getUTCMonth() - 6); + return d.toISOString().split('T')[0]; + })(); + const toDate = (iso) => new Date(iso + 'T00:00:00.000Z'); + + // Project + const proj = await github.graphql( + `query($org:String!,$num:Int!){ + organization(login:$org){ + projectV2(number:$num){ id } + } + }`, + { org: orgLogin, num: projectNumber } + ); + const projectId = proj.organization?.projectV2?.id; + if (!projectId) throw new Error('Project not found'); + + // Fields + const fields = (await github.graphql( + `query($id:ID!){ + node(id:$id){ + ... on ProjectV2 { + fields(first:50){ + nodes{ + __typename + ... on ProjectV2Field { id name dataType } + ... on ProjectV2SingleSelectField { id name options { id name } } + } + } + } + } + }`, { id: projectId } + )).node.fields.nodes; + + const findDateFieldId = (name) => + fields.find(f => f.__typename === 'ProjectV2Field' && f.name === name && f.dataType === 'DATE')?.id || null; + + const findTextFieldId = (name) => { + const exact = fields.find(f => f.__typename === 'ProjectV2Field' && f.name === name && f.dataType === 'TEXT'); + if (exact) return exact.id; + const ci = fields.find(f => f.__typename === 'ProjectV2Field' && (f.name?.toLowerCase?.() === name.toLowerCase()) && f.dataType === 'TEXT'); + return ci?.id || null; + }; + + const statusField = fields.find(f => f.__typename === 'ProjectV2SingleSelectField' && f.name === STATUS_FIELD_NAME); + const statusFieldId = statusField?.id || null; + const doneId = statusField?.options?.find(o => o.name === STATUS_DONE)?.id || null; + const maintId = statusField?.options?.find(o => o.name === STATUS_MAINT)?.id || null; + + const lrdId = findDateFieldId(LRD_FIELD_NAME); + const publishedUrlFieldId = findTextFieldId(PUBLISHED_URL_FIELD_NAME); + + if (!statusFieldId || !doneId || !maintId || !lrdId) { + throw new Error('Missing required project fields/options: Status/Done/Maintenance or Last Reviewed Date.'); + } + + // Helpers + const getDate = (item, fieldId) => + item.fieldValues.nodes.find(n => + n.__typename === 'ProjectV2ItemFieldDateValue' && n.field?.id === fieldId + )?.date || null; + + const getText = (item, fieldId) => + item.fieldValues.nodes.find(n => + n.__typename === 'ProjectV2ItemFieldTextValue' && n.field?.id === fieldId + )?.text || null; + + const getStatusName = (item) => { + const n = item.fieldValues.nodes.find(n => + n.__typename === 'ProjectV2ItemFieldSingleSelectValue' && n.field?.id === statusFieldId + ); + return n?.name || null; + }; + + const setStatus = async (itemId, fieldId, optionId) => { + if (dryRun) { + console.log(`[DRY RUN] setStatus item=${itemId} -> option=${optionId}`); + return; + } + const m = ` + mutation($p:ID!,$i:ID!,$f:ID!,$o:String!){ + updateProjectV2ItemFieldValue(input:{ + projectId:$p, itemId:$i, fieldId:$f, value:{ singleSelectOptionId:$o } + }){ + projectV2Item { id } + } + }`; + await github.graphql(m, { p: projectId, i: itemId, f: fieldId, o: optionId }); + }; + + async function* iterItems() { + let cursor = null; + for (;;) { + const r = await github.graphql( + `query($org:String!,$num:Int!,$after:String){ + organization(login:$org){ + projectV2(number:$num){ + items(first:100, after:$after){ + nodes{ + id + content{ + __typename + ... on PullRequest { + number + repository { name } + } + } + fieldValues(first:100){ + nodes{ + __typename + ... on ProjectV2ItemFieldDateValue { + field { ... on ProjectV2Field { id name } } + date + } + ... on ProjectV2ItemFieldTextValue { + field { ... on ProjectV2Field { id name } } + text + } + ... on ProjectV2ItemFieldSingleSelectValue { + field { ... on ProjectV2SingleSelectField { id name } } + name + optionId + } + } + } + } + pageInfo { + hasNextPage + endCursor + } + } + } + } + }`, + { org: orgLogin, num: projectNumber, after: cursor } + ); + + const page = r.organization.projectV2.items; + for (const n of page.nodes) yield n; + if (!page.pageInfo.hasNextPage) break; + cursor = page.pageInfo.endCursor; + } + } + + const lastTwoFromUrl = (url) => { + if (!url) return ''; + try { + const u = new URL(url); + const segs = u.pathname.split('/').filter(Boolean); + if (segs.length >= 2) return `${segs[segs.length - 2]}/${segs[segs.length - 1]}/`; + if (segs.length === 1) return `${segs[0]}/`; + return ''; + } catch { return ''; } + }; + + // Movement counters & log + let movedDoneToMaint = 0; + let movedMaintToDone = 0; + const moveLog = []; + + // Sweep + for await (const item of iterItems()) { + if (item.content?.__typename !== 'PullRequest') continue; // PRs only + + const itemId = item.id; + const status = getStatusName(item); + const lrd = getDate(item, lrdId); + if (!status || !lrd) continue; // only move when LRD exists + + const prNumber = item.content.number; + const repoName = item.content.repository.name; + + const publishedUrl = publishedUrlFieldId ? getText(item, publishedUrlFieldId) : null; + const lastTwoSegments = lastTwoFromUrl(publishedUrl) || '(no-published-url)'; + + // Done -> Maintenance: LRD older/equal than 6 months ago + if (status === STATUS_DONE && toDate(lrd) <= toDate(sixMonthsAgoISO)) { + await setStatus(itemId, statusFieldId, maintId); + movedDoneToMaint++; + const line = `[Cron] Moved ${lastTwoSegments} → Maintenance (LRD ${lrd} ≤ ${sixMonthsAgoISO})`; + console.log(line); + moveLog.push(line); + continue; // skip second rule for same item + } + + // Maintenance -> Done: LRD within last 6 months (strictly newer than threshold) + if (status === STATUS_MAINT && toDate(lrd) > toDate(sixMonthsAgoISO)) { + await setStatus(itemId, statusFieldId, doneId); + movedMaintToDone++; + const line = `[Cron] Moved ${lastTwoSegments} → Done (LRD ${lrd} > ${sixMonthsAgoISO})`; + console.log(line); + moveLog.push(line); + } + } + + // Summary + const totalMoves = movedDoneToMaint + movedMaintToDone; + console.log(`Cron complete. Moved Done→Maintenance: ${movedDoneToMaint}, Maintenance→Done: ${movedMaintToDone}, Total: ${totalMoves}. Dry run: ${dryRun}`); + + // Nice Job Summary in the Actions UI + await core.summary + .addHeading('Last Reviewed Cron Summary') + .addTable([ + [{ data: 'Direction', header: true }, { data: 'Count', header: true }], + ['Done → Maintenance', String(movedDoneToMaint)], + ['Maintenance → Done', String(movedMaintToDone)], + ['Total moves', String(totalMoves)], + ]) + .addHeading('Details', 2) + .addCodeBlock(moveLog.join('\n') || 'No moves', 'text') + .write(); diff --git a/.wordlist.txt b/.wordlist.txt index 6c185d8c42..85a6e42060 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -5039,4 +5039,19 @@ vesion wwwrun xdebug zoneIdentifier -zypper \ No newline at end of file +zypper +keyspace +Keyspace +CQL +cqlsh +keyspaces +CQLSH +SAI +SSTables +Trie +UCS +memtables +cassandra +Cassandra's +Cassandra +CircleCI diff --git a/content/install-guides/bolt.md b/content/install-guides/bolt.md index 1878f388ca..e9b6c7cff1 100644 --- a/content/install-guides/bolt.md +++ b/content/install-guides/bolt.md @@ -29,6 +29,11 @@ BOLT is an open-source post-link binary optimization tool developed to speed up This article provides quick instructions to download and install BOLT. The instructions are for Debian-based Linux distributions, but can be adapted for other Linux distributions. +{{% notice Note %}} +BOLT is provided as a built-in, ready-to-use component of the [Arm Toolchain for Linux](https://developer.arm.com/documentation/110477) suite. For more +information refer to [this guide](https://developer.arm.com/documentation/110477/211/How-to-use-BOLT-with-our-toolchain). +{{% /notice %}} + 1. Install Git [Install Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) using the documentation for your operating system. diff --git a/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md b/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md index 54af0efaf1..949ce01844 100644 --- a/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md +++ b/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md @@ -5,7 +5,6 @@ minutes_to_complete: 120 who_is_this_for: This is an introductory topic for software developers learning about dynamic memory allocation for the first time, and who may have used malloc and free in C programming. It also provides a starting point to explore more advanced memory allocation topics. -layout: learningpathall learning_objectives: - Explain how dynamic memory allocation and the C heap works - Write a simple dynamic memory allocator @@ -56,4 +55,4 @@ shared_between: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. ---- \ No newline at end of file +--- diff --git a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md index 2c2c7ce60b..981fbc3c3a 100644 --- a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md +++ b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md @@ -21,7 +21,7 @@ This section walks you through the required tools and two supported setup option ## Download and explore the code examples -To get started, begin by [downloading the code examples](https://gitlab.arm.com/learning-code-examples/code-examples/-/archive/main/code-examples-main.tar.gz?path=learning-paths/cross-platform/multiplying-matrices-with-sme2). +To get started, begin by [downloading the code examples](https://gitlab.arm.com/learning-code-examples/code-examples/-/archive/d41190c0cf962f778ae71b94adf5330033019aed/code-examples-d41190c0cf962f778ae71b94adf5330033019aed.tar.gz?path=learning-paths/cross-platform/multiplying-matrices-with-sme2). Now extract the archive, and change directory to: ``code-examples/learning-paths/cross-platform/multiplying-matrices-with-sme2.`` @@ -68,7 +68,7 @@ Among other files, it includes: - `run-fvp.sh` to run the FVP model. - A `docker` directory containing: - `assets.source_me` to provide toolchain paths. - - `build-my-container.sh`, a script that automates building the Docker image from the `sme2-environment.docker` file. It runs the Docker build command with the correct arguments so you don’t have to remember them. + - `build-my-container.sh`, a script that automates building the Docker image from the `sme2-environment.docker` file. It runs the Docker build command with the correct arguments so you don’t have to remember them. - `sme2-environment.docker`, a custom Docker file that defines the steps to build the SME2 container image. It installs all the necessary dependencies, including the SME2-compatible compiler and Arm FVP emulator. - `build-all-containers.sh`, a script to build multi-architecture images. - `.devcontainer/devcontainer.json` for VS Code container support. @@ -113,7 +113,7 @@ If your machine doesn't support SME2, or you want to emulate it, you can use the The Docker container includes both a compiler and [Arm's Fixed Virtual Platform (FVP) model](https://developer.arm.com/Tools%20and%20Software/Fixed%20Virtual%20Platforms) -for emulating code that uses SME2 instructions. You can either run the prebuilt container image provided in this Learning Path or build it yourself using the Docker file that is included. +for emulating code that uses SME2 instructions. You can either run the prebuilt container image provided in this Learning Path or build it yourself using the Docker file that is included. If building manually, follow the instructions in the ``sme2-environment.docker`` file to install the required tools on your machine. @@ -144,7 +144,7 @@ that it is working with the following: docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world -c9c5fd25a1bd: Pull complete +c9c5fd25a1bd: Pull complete Digest: sha256:940c619fbd418f9b2b1b63e25d8861f9cc1b46e3fc8b018ccfe8b78f19b8cc4f Status: Downloaded newer image for hello-world:latest @@ -257,3 +257,12 @@ These Apple devices support SME2 natively. | Mac Mini (2024) | 2024 | M4, M4 Pro, M4 Max | | MacBook Pro (14-inch, 16-inch, 2024)| 2024 | M4 Pro, M4 Max | | MacBook Air (2025) | 2025 | M4 | + + +These Android phones support SME2 natively. + + +| Device | Release Date | Chip Options | +|-------------------------------------|--------------|---------------------------| +| Vivo X300 | 2025 | MediaTek Dimensity 9500 featuring an 8-core Arm C1 CPU cluster and Arm G1-Ultra GPU | +| OPPO Find X9 | 2025 | MediaTek Dimensity 9500 featuring an 8-core Arm C1 CPU cluster and Arm G1-Ultra GPU | diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/_index.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/_index.md new file mode 100644 index 0000000000..1bc986ec80 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/_index.md @@ -0,0 +1,49 @@ +--- +title: Deploy Edge AI models scalably using Edge Impulse and AWS IoT Greengrass + +draft: true +cascade: + draft: true + +minutes_to_complete: 120 + +who_is_this_for: This learning path is for Edge AI and embedded engineers who need to scalably deploy crafted ML for the Edge to thousands of edge devices. + +learning_objectives: + - Basic understanding of Edge Impulses Edge ML Solution + - Basic hardware setup for Edge AI ML development with Edge Impulse + - Install AWS IoT Greengrass onto the edge device + - Configure the edge device with the custom integration between Edge Implulse and AWS IoT Greengrass + +prerequisites: + - An [Edge Impulse Studio](https://studio.edgeimpulse.com/signup) account (workshop will walk through this). + - An AWS Account (if not being hosted by AWS Workshop Studio) + +author: Doug Anson + +### Tags +skilllevels: Introductory +cloud_service_providers: AWS +subjects: ML +armips: + - Cortex-M + +tools_software_languages: + - Edge Impulse + - Edge AI + +operatingsystems: + - Linux + +### FIXED, DO NOT MODIFY +# ================================================================================ +further_reading: + - resource: + title: Edge Impulse for beginners + link: https://docs.edgeimpulse.com/docs/readme/for-beginners + type: doc + +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/_next-steps.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/cleanup.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/cleanup.md new file mode 100644 index 0000000000..a06354b69f --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/cleanup.md @@ -0,0 +1,13 @@ +--- +title: 9. AWS Account Cleanup (Optional) +weight: 11 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Cleanup + +**AWS workshop attendees**: The temporary AWS account given to you will automatically be deleted. No other action is necessary at this time. + +**Personal AWS Accounts**: To minimize costs of your AWS resources, you can go to the AWS IoTCore Greengrass deployments page and revise your deployment. In the revision, remove the Edge Impulse custom component from the deployment and redeploy. This will shutdown the "runner" service on your edge device and will no longer send messages into IoTCore when inference results are present. Additionally, if using the EC2 edge device in the workshop, you will want to navigate to the EC2 dashboard, select your EC2 instance you created, and then set the instance state to "terminated" via the "Instance state" button/dropdown. You can also cancel your Greengrass deployments and delete both your Greengrass core device as well as your IoT Thing for your core device (all accomplished via the IoTCore dashboard). diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/customcomponentdeployment.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/customcomponentdeployment.md new file mode 100644 index 0000000000..2496172830 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/customcomponentdeployment.md @@ -0,0 +1,73 @@ +--- +title: 6. Custom Component Deployment +weight: 8 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Greengrass Component Deployment + +In this section, we will create an AWS IoT Greengrass deployment that will download, prepare, install, and run our Edge Impulse "Runner" service on our edge device. When the "Runner" service starts, it will connect back to our Edge Impulse environment via the API key we inserted into AWS Secret Manager and will download and start to run our deployed ML model we created in Edge Impulse Studio! Let's get this started! + +### 0. (Non-Camera Edge Devices Only): Additional Custom Component + +If your edge device does not contain a camera (i.e. EC2 edge device), you will need to deploy an additional custom component. Please follow [these steps](./NonCameraCustomComponent.md) to get the additional component created. You will be selecting this component in addition to the custom component we created for the Edge Impulse "Runner" service. + +### 1. Deploy the custom component to a selected Greengrass edge device or group of edge devices. + +Almost done! We can now go back to the AWS Console -> IoT Core -> Greengrass -> Deployments page and select a deployment (or create a new one!) to deploy our component down to as selected edge device or group of gateways as needed: + +Deploy to a group of devices: + +![GGDeploy](./images/GG_Create_Deployment.png) + +Deploy to a specific device (i.e. my EC2 Edge Device): + +![GGDeploy](./images/GG_Create_Deployment_2.png) + +In either case above we now press "Next" and select our newly created custom component: + +![GGDeploy](./images/GG_Create_Deployment_3.png) + +>**_NOTE:_** +>If you are using an edge device which does not have a camera, you will also need to select the "EdgeImpulseRunnerRuntimeInstallerComponent" custom component that you created above ("Non-Camera Edge Device Custom Component"): +>![GGDeploy](./images/GG_Create_Deployment_3a.png) + +Press "Next" again, then select our custom component and press "Configure Component" to configure the "Runner" component: + +![GGDeploy](./images/GG_Create_Deployment_4.png) + +>**_NOTE:_** +>If you also have the Non-Camera component, it does NOT need to be configured... only the "EdgeImpulseLinuxRunnerServiceComponent" should be configured + +#### Customizing a specific Deployment + +We now see that our custom component we registered has a default configuration. We can, however, customize it specifically for our specific hardware configuration (i.e. to a specific device or group of similar devices...). + +First lets recall the JSON we saved off when we configured our hardware. Lets customize our Greengrass deployment by clearing, copying, and pasting that JSON into the "Configuration to merge" window... then press "Confirm": + +![GGDeploy](./images/GG_Create_Deployment_5.png) + +You'll then see the previous page and continue pressing "Next" until you get to the "Deploy" page: + +![GGDeploy](./images/GG_Create_Deployment_6.png) + +> **_NOTE:_** +>When performing the deployment, its quite common to, when selecting one of our newly created custom components, to then "Customize" that component by selecting it for "Customization" and entering a new JSON structure (same structure as what's found in the component's associated YAML file for the default configuration) that can be adjusted for a specific deployment (i.e. perhaps your want to change the DeviceName for this particular deployment or specify "gst_args" for a specific edge device(s) camera, etc...). This highlights the power and utility of the component and its deployment mechanism in AWS IoT Greengrass. + + +> **_NOTE:_** +> The component deployment may take awhile depending on network speed/etc... the reason for this is that all of the required prerequisites to run the Edge Impulse "Runner" service have to be downloaded, setup, and installed. +> +> Back on the edge device via SSH, you can "tail" two different files to watch the progress of the installation/setup as well as the component operation (as root): +> +> % sudo su - +> # tail -f /greengrass/v2/logs/EdgeImpulseLinuxRunnerServiceComponent.log +> # tail -f /tmp/ei*log +> +> The first "tail" will log all of the installation activity during the component setup. The second "tail" (wildcarded) will be the log file of the "running" component. You can actually watch the Edge Impulse "Runner" output in that file if you wish. +> +> Both files are critical for debugging any potential issues with the deployment and/or component configuration. + +Now that our custom component has been deployed, the component will install Edge Impulse's "Runner" runtime that will then, in turn, pull down and invoke our Edge Impulse's current Impulse (i.e. model...). We will next check that our model is running on our edge device! \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/edgeimpulsecustomcomponentinstall.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/edgeimpulsecustomcomponentinstall.md new file mode 100644 index 0000000000..798c5c6933 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/edgeimpulsecustomcomponentinstall.md @@ -0,0 +1,124 @@ +--- +title: 5. Edge Impulse Custom Component Creation +weight: 7 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Edge Impulse "Runner" Service Custom Component + +We will utilize a Greengrass "Custom Component" to create and deploy the Edge Impulse runner service (the service that will run our Edge Impulse model on the edge device) including the required additional prerequisites (NodeJS install, libvips install). AWS IoT Greengrass' custom component feature is ideal to create custom components that can be specialized to prepare, run, and shutdown a given custom service. + +Let's get started! + +### 1. Clone the repo to acquire the Edge Impulse Component recipes and artifacts + +Please clone this [repo](https://github.com/edgeimpulse/aws-greengrass-components) to retrieve the Edge Impulse component recipes (yaml files) and the associated artifacts. + +### 2. Upload Edge Impulse Greengrass Component artifacts into AWS S3 + +First, you need to go to the S3 console in AWS via AWS Console -> S3. From there, you will create an S3 bucket. For sake of example, we name this bucket "MyS3Bucket123". + + ![CreateS3Bucket](./images/S3_Create_Bucket.png) + +Next, the following directory structure needs to be created your new bucket: + + ./artifacts/EdgeImpulseServiceComponent/1.0.0 + +Next, navigate to the "1.0.0" directory in your S3 bucket and then press "Upload" to upload the artifacts into the bucket. You need to upload the following files (these will be located in the ./artifacts/EdgeImpulseServiceComponent/1.0.0 from your cloned repo). Please upload all of these files into S3 at the above directory location: + + install.sh + run.sh + launch.sh + stop.sh + +Your S3 Bucket contents should look like this: + +![UploadToS3](./images/S3_Upload_Artifacts.png) + +### 3. Customize the component recipe files + +Next we need to customize our Greengrass component recipe YAML file to reflect the actual location of our artifacts stored in S3. Please replace ALL occurrences of "YOUR\_S3\_ARTIFACT\_BUCKET" with your S3 bucket name (i.e. "MyS3Bucket123"). Please do this to the "EdgeImpulseLinuxRunnerServiceComponent.yaml" file. Save the file. + +Also FYI, we can customize the defaulted configuration of your custom component by editing, within "EdgeImpulseLinuxRunnerServiceComponent.yaml" file, the default configuration JSON. We won't need to do this for this workshop but its an useful option nonetheless. + +The default configuration in "EdgeImpulseLinuxRunnerServiceComponent.yaml" contains the following JSON configuration settings for the component: + + EdgeImpulseLinuxRunnerServiceComponent.yaml: + { + "node_version": "20.12.1", + "vips_version": "8.12.1", + "device_name": "MyEdgeImpulseDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "__none__", + "eiparams": "--greengrass", + "iotcore_backoff": "5", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_ggc_user_groups": "video audio input users", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "__none__", + "ei_shutdown_behavior": "__none__", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 50.0, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__", + } + +#### Attribute Description + +The attributes in each of the above default configurations is outlined below: + +* **node\_version**: Version of NodeJS to be installed by the component +* **vips\_version**: Version of the libvips library to be compiled/installed by the component +* **device\_name**: Template for the name of the device in EdgeImpulse... a unique suffix will be added to the name to prevent collisions when deploying to groups of devices +* **launch**: service launch type (typically just leave this as-is) +* **sleep\_time\_sec**: wait loop sleep time (component lifecycle stuff... leave as-is) +* **lock\_filename**: name of lock file for this component (leave as-is) +* **gst\_args**: optional GStreamer args, spaces replaced with ":", for custom video invocations +* **eiparams**: additional parameters for launching the Edge Impulse service (leave as-is) +* **iotcore\_backoff**: number of inferences to "skip" before publication to AWS IoTCore... this is used to control publication frequency (AWS $$...) +* **iotcore\_qos**: MQTT QoS (typically leave as-is) +* **ei\_bindir**: Typical location of where the Edge Impulse services are installed (leave as-is) +* **ei\_ggc\_user\_groups**: A list of additional groups the Greengrass service user account will need to be a member of to allow the Edge Impulse service to invoke and operate correctly (typically leave as-is). For JetPack v6.x and above, please add "render" as an additional group. +* **ei\_sm\_secret\_id**: ID of the Edge Impulse API Key within AWS Secret Manager +* **ei\_sm\_secret\_name**: Name of the Edge Impulse API Key within AWS Secret Manager +* **install\_kvssink**: Option (default: "no", on: "yes") to build and make ready the kvssink gstreamer plugin +* **publish\_inference\_base64\_image**: Option (default: "no", on: "yes") to include a base64 encoded image that the inference result was based on +* **enable\_cache\_to\_file**: Option (default: "no", on: "yes") to enable both inference and associated image to get written to a specified local directory as a pair: .img and .json for each inference identified with a +* **cache\_file\_directory**: Option (default: "__none__") to specify the local directory when enable_cache_to_file is set to "yes" +* **ei\_poll\_sleeptime\_ms**: time (in ms) for the long polling message processor (typically leave as-is) +* **ei\_local\_model\_file**: option to utilize a previously installed local model file +* **ei\_shutdown\_behavior**: option to alter the shutdown behavior of the linux runner process. (can be set to "wait\_for\_restart" to cause the runner to pause after running the model and wait for the "restart" command to be issued (see "Commands" below for more details on the "restart" command)) +* **enable\_threshold\_limit**: option to enable/disable the threshold confidence filter (must be "yes" or "no". Default is "no") +* **metrics\_sleeptime\_ms**: option to publish the model metrics statistics (time specified in ms). +* **default\_threshold**: option to specify threshold confidence filter "limit" (a value between 0 < x <= 1.0). Default setting is 0.7 +* **threshold\_criteria**: option to specify the threshold confidence filter criteria (must be one of: "gt", "ge", "eq", "le", or "lt") +* **enable\_cache\_to\_s3**: option to enable caching the inference image/result to an AWS S3 bucket +* **s3\_bucket**: name of the optional S3 bucket to cache results into + +### 4. Register the custom component via its recipe file + +From the AWS Console -> IoT Core -> Greengrass -> Components, select "Create component". Then: + + 1. Select the "yaml" option to Enter the recipe + 2. Clear the text box to remove the default "hello world" yaml recipe + 3. Copy/Paste the entire/edited contents of your "EdgeImpulseLinuxRunnerServiceComponent.yaml" file + 4. Press "Create Component" + +![CreateComponent](./images/GG_Create_Component.png) + +If formatting and artifact access checks out OK, you should have a newly created component listed in your Custom Components AWS dashboard! + +Next we will create a Greengrass Deployment to deploy our custom component to our edge devices. \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/edgeimpulseprojectbuild.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/edgeimpulseprojectbuild.md new file mode 100644 index 0000000000..856cea5d4b --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/edgeimpulseprojectbuild.md @@ -0,0 +1,113 @@ +--- +title: 2. Edge Impulse Project Setup +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Creating our Edge Impulse Environment + +The next step is to create our Edge Impulse environment. Edge Impulse provides a simple solution to creating and building a ML model for specific edge devices focused on specific tasks. Lets get started. + +### 1. Create Edge Impulse Account + +Lets create our account in Edge Impulse. Navigate to https://studio.edgeimpulse.com and select "Sign Up" down in the right hand corner: + +![Sign Up](./images/EI_SignUp_1.png) + +Next, fill in the requested information and press "Sign Up": + +![Complete Information](./images/EI_SignUp_2.png) + +If successful, you will be promoted as follows. Press "Click here to build your first ML model": + +![Successful Sign Up](./images/EI_SignUp_3.png) + +You will be presented with a wizard to create a new default project: + +![Intro Wizard To create ML Model](./images/EI_SIgnUp_4.png) + +You can dismiss the wizard by pressing the "-" in the upper right hand corner... this will reveal your current new default project: + +![New Project](./images/EI_SignUp_5.png) + +Next, we will clone an existing project that has a model that has already been created for you and which we will use for this workshop. On to the next step! + +### 2. Clone Project Into Your Account + +Next, we are going to clone an existing project into our own space. Navigate to this public project: + +https://studio.edgeimpulse.com/studio/524106 + +![Public Project](./images/EI_Clone_1.png) + +Press the "Clone this project" button in the upper right. You will be presented with a dialog that will initiate the clone: + +![Clone Project](./images/EI_Clone_2.png) + +Leave everything defaulted and press "Clone Project" in the lower right. The cloning process will commence: + +![Start Project Clone](./images/EI_Clone_3.png) + +The cloning process will take about 12 minutes to complete. When it is complete: + +![Completed Clone](./images/EI_Clone_4.png) + +Next, click "Dashboard" to view your project... it should look something like this: + +![My Cloned Project](./images/EI_Clone_5.png) + +OK! We now have the project we will use for the workshop... lets continue by exploring the project a bit and creating a deployment for our own edge device. Onward! + +### 3. Build your project's deployment + +Let's have a look at some of the features in Edge Impulse studio. From a high level, Edge Impulse studio provides a solution to build, train, optimize, and deploy ML models for any edge device: + +![Edge Impulse](./images/EI_Project_1.png) + +Key in this is the "Impulse". On the left side of the dashboard, our "Impulse" has been created for us and is called "Cat and Dog Detector". Click on "Create Impulse". You will see that there are 3 main parts of a "Impulse": The pre-processing block, the model block, and the post-processing block: + +![Edge Impulse](./images/EI_Project_2.png) + +Clicking on "Object Detection" on the left, you will see some detail on the model that has been utlized in our Impulse: + +![Edge Impulse](./images/EI_Project_3.png) + +In our project, the "Impulse" is fully created, trained, and optimized so we won't have to walk through those steps. Edge Impulse has a ton of examples and docs available [here](https:://docs.edgeimpulse.com) to walk you through your first "Impulse" creation: + +![Edge Impulse](./images/EI_Project_4.png) + +What we want to do now is to deploy our model to a specific edge device type. Depending on the specific hardware you are using in this workshop, you can choose from the following deployment edge device choices: + +![Edge Impulse](./images/EI_Project_5.png) + +Please select the appropriate choice and press "Build" (Example, for Raspberry Pi, choose "Linux(AARCH64)" to run the model on the CPU of the RPi: + +![Edge Impulse](./images/EI_Project_6.png) + + NOTE: For these edge device choices, please select the "int8" option + prior to pressing "Build". + + NOTE: The "Linux(AARCH64)" is suitable for many Linux-class ARM-based + 64bit devices where only the CPU will be used to run the model. + +Now that we have built our deployment, we are ready to move on to the next step - creating an API Key. Lets do this! + +### 4. Create your project API key + +Lastly, lets create our API key for our project. We'll use this key to connect our Greengrass component's environment to our Edge Impulse project. Click on the "Dashboard" on the left hand side of our project: + +![Edge Impulse Dashboard](./images/EI_Key_1.png) + +Press "Keys": + +![Edge Impulse Dashboard](./images/EI_Key_2.png) + +Press "Add new API key" on the upper right side. Provide a name for the key. The role should be "admin" and "Set as development key" should be selected. Press "Create API key": + +![Edge Impulse Dashboard](./images/EI_Key_3.png) + +You will then be presented with the API key. Make a copy of this key as this will be the only time you will be able to see the full key for copying. We will place this key into AWS Secret Manager shortly so be sure to save it now!! + +OK! We are making good progress! Next up, we are going to install AWS IoT Greengrass into our edge device. Lets go! \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/greengrassinstallation.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/greengrassinstallation.md new file mode 100644 index 0000000000..87e42301c7 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/greengrassinstallation.md @@ -0,0 +1,97 @@ +--- +title: 3. AWS IoT Greengrass Installation +weight: 5 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## AWS IoT Greengrass Installation + +The following sections outline how one installs AWS IoT Greengrass onto our edge device. AWS IoT Greengrass is ideal to use to create deployments of software and settings down to edge devices in a very scalable fashion. + +Log into your edge device via SSH and we'll start the process of installing/configuring Greengrass. + +### 1. Create AWS Administrator Credentials + +Prior to installing AWS IoT Greengrass, we need to create a set of AWS credentials that will be used as part of the installation process. + +>**_NOTE:_** +>These credentials may automatically be provided to you when you initiate the workshop has hosted by AWS Workshop Studio. If so, please copy the credentials as we'll need them in the next step. The credentials should look like this: +> +> export AWS_ACCESS_KEY_ID= +> export AWS_SECRET_ACCESS_KEY= + +If you are using your personal AWS account and do not have the credentails created, you will need to create them. If you already have them, please skip the next step and proceed to step 3) below. + +#### 1a. Creating Access Credentials (personal AWS Accounts) + +Please navigate to your AWS Dashboard and search for IAM: + +![IAM](./images/GG_Install_iam.png) + +Launch the IAM Dashboard: + +![IAM](./images/GG_Install_iam_dashboard.png) + +Select "Users" from the left hand side of the dashboard: + +![IAM](./images/GG_Install_iam_2.png) + +Select your user, then select the "Security Credentials" tab: + +![IAM](./images/GG_Install_iam_3.png) + +Press "Create access key": + +![IAM](./images/GG_Install_iam_4.png) + +Choose "Other" and then press "Next": + +![IAM](./images/GG_Install_iam_5.png) + +Set a description for the access key and then press "Create access key" + +![IAM](./images/GG_Install_iam_6.png) + +You will now have the (only...) opportunity to copy and save off your credentials. Its best if you save them to a temp file that you'll read later in this format: + + export AWS_ACCESS_KEY_ID= + export AWS_SECRET_ACCESS_KEY= + +### 2. Install AWS IoT Greengrass + +Greengrass is typically installed from within the AWS Console -> AWS IoT Core -> Greengrass -> Core Devices menu... select/press "Set up one core device". There are multiple ways to install Greengrass - "Nucleus Classic" is the version of Greengrass that is based on Java. "Nucleus Lite" is a native version of Greengrass that is typically part of a Yocto-image based implementation. + +In this example, we choose the "Linux" device type and we are going to download the installer for Greengrass and invoke it as part of the installation of a "Nucleus Classic" instance: + +![CreateDevice](./images/GG_Install_Device.png) + +Lower down in the menu, you will see the specific instructions that are custom-crafted for you to download and invoke the "Nucleus Classic" installer. The basic sequence of instructions are: + + 1) Start with a SSH shell session into your edge device + 2) copy and paste your two AWS credentials into the shell environment + 3) copy and paste/run the installer download curl command into your shell + 4) copy and paste/run the installer invocation command + 5) wait for the installer to complete + + ![CreateDevice](./images/GG_Install_Device2.png) + +### 3. Modify the Greengrass TokenExchange Role with additional permissions + +When you run a Greengrass component within Greengrass, a service user (typically a linux user called "ggc_user" for "Nucleus Classic" installations) invokes the component, as specified in the lifecycle section of your recipe. Credentials are passed to the invoked process via its environment (NOT by the login environment of the "Greengrassc_user"...) during the invocation spawning process. These credentials are used by by the spawned process (typically via the AWS SDK which is part of the spawned process...) to connect back to AWS and "do stuff". These permissions are controlled by a AWS IAM Role called "GreengrassV2TokenExchangeRole". We need to modify that role and add "Full AWS IoT Core Permission" as well as "AWS Secrets Manager Read/Write" permission. + +To modify the role, from the AWS Console -> IAM -> Roles search for "GreengrassV2TokenExchangeRole", Then: + + 1. Select "GreengrassV2TokenExchangeRole" in the search results list + 2. Select "Add Permissions" -> "Attach Policies" + 3. Search for "AWSIoTFullAccess", select it, then press "Add Permission" down at the bottom + 4. Repeat the search for "S3FullAccess" and "SecretsManagerReadWrite" + +![TERUpdate](./images/IAM_TER_Update.png) + +When done, your GreengrassV2TokenExchangeRole should now show that it has "AWSIoTFullAccess", "S3FullAccess" and "SecretsManagerReadWrite" permissions added to it. + +Next, we will clone and configure the EdgeImpulse "Runner" custom component used to deploy the Edge Impulse "Runner" model execution runtime. + +Onward! \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupec2.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupec2.md new file mode 100644 index 0000000000..7dece6c720 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupec2.md @@ -0,0 +1,110 @@ +## Setup and Configuration for Ubuntu-based EC2 instance + +### Create Ubuntu EC2 Instance + +AWS EC2 instances can be used to simulate edge devices when edge device hardware isn't available. + +We'll start by opening our AWS Console and search for EC2: + +![AWS Console](../images/EC2_Setup_1.png) + +We'll now open the EC2 console page: + +![AWS EC2 Console](../images/EC2_Setup_2.png) + +Select "Launch instance". Provide a Name for the EC2 instance and select the "Ubuntu" Quick Start option. Additionally, select "64-bit(Arm)" as the architecture type and select "t4g.large" as the Instance type: + +![Create EC2 Instance](../images/EC2_Setup_3.png) + +Additionally, please click on "Create new Key Pair" and provide a name for a new SSH key pair that will be used to SSH into our EC2 instance. Press "Create key pair": + +![Create EC2 Keypair](../images/EC2_Setup_4.png) + +>**_NOTE:_** +>You will notice that a download will occur with your browser. Save off this key (a .pem file) as we'll use it shortly. + +Next, we need to edit our "Network Settings" for our EC2 instance... scroll down to "Network Settings" and press "Edit": + +![Security Group](../images/EC2_Setup_4_ns.png) + +Press "Add security group rule" and lets allow port tcp/4912: + +![Security Group](../images/EC2_Setup_4_4912.png) + +Lets also give the EC2 instance a bit more disk space. Please change the "8" to "28" here: + +![Increase Diskspace](../images/EC2_Setup_5.png) + +Finally, press "Launch instance". You should see your EC2 instance getting created: + +![Launch Instance](../images/EC2_Setup_6.png) + +Now, press "View all instances" and press the refresh button... you should see your new EC2 instance in the "Running" state: + +![Running Instance](../images/EC2_Setup_7.png) + +You can scroll over and save off your Public IPv4 IP Address. You'll need this to SSH into your EC2 instance. + +Lets now confirm that we can SSH into our EC2 instance. With the saved off pem file and our EC2 Public IPv4 IP address, lets ssh into our EC2 instance + +>**_NOTE:_** +>In this example, my pem file is named DougsEC2SimulatedEdgeDeviceKeyPair.pem and my EC2 instances' public IP address is 1.2.3.4 + + chmod 600 DougsEC2SimulatedEdgeDeviceKeyPair.pem + ssh -i ./DougsEC2SimulatedEdgeDeviceKeyPair.pem ubuntu@1.2.3.4 + +You should see a login shell now for your EC2 instance! + +![Login Shell](../images/EC2_Setup_8.png) + +Excellent! You can keep that shell open as we'll make use of it when we start installing Greengrass a bit later. + +Lastly, lets install the prerequisites that we need. Please run these commands to add some required dependencies: + + sudo apt update + sudo apt install -y curl unzip + sudo apt install -y gcc g++ make build-essential nodejs sox gstreamer1.0-tools gstreamer1.0-plugins-good gstreamer1.0-plugins-base gstreamer1.0-plugins-base-apps + +Additionally, we need to install the prerequisites for AWS IoT Greengrass "classic": + + sudo apt install -y default-jdk + +Before we go to the next section, lets also save off this JSON - it will be used to configure our AWS Greengrass custom component a bit later: + +#### Non-Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyEC2EdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "filesrc:location=/home/ggc_user/data/testSample.mp4:!:decodebin:!:videoconvert:!:videorate:!:video/x-raw,framerate=2200/1:!:jpegenc", + "eiparams": "--greengrass", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "/home/ggc_user/data/currentModel.eim", + "ei_shutdown_behavior": "wait_on_restart", + "ei_ggc_user_groups": "video audio input users system", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 50, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + +OK, Lets proceed to the next step and get our Edge Impulse environment setup! + +[Next](../../edgeimpulseprojectbuild/) diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupnvidiajetson.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupnvidiajetson.md new file mode 100644 index 0000000000..436328a2a1 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupnvidiajetson.md @@ -0,0 +1,92 @@ +## Install/Configure Nvidia Jetpack (Jetson devices) + +The workshop will assume that the Nvidia Jetson edge device has been loaded with Jetpack 5.x and/or Jetpack 6.0 per flashing instructions located at this [Nvidia website](https://docs.nvidia.com/jetson/archives/r34.1/DeveloperGuide/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/flashing.html). + +### Additional Setup + +Once you have your Jetson platform installed and running, please run these commands to add some required dependencies: + + sudo apt update + sudo apt install -y curl unzip + sudo apt install -y gcc g++ make build-essential nodejs sox gstreamer1.0-tools gstreamer1.0-plugins-good gstreamer1.0-plugins-base gstreamer1.0-plugins-base-apps + +Additionally, we need to install the prerequisites for AWS IoT Greengrass "classic": + + sudo apt install -y default-jdk + +Lastly, its recommended to update your linux device with the latest security patches and updates if available. + +We are now setup! Before we continue, please save off the following JSONs. These JSONs will be used to configure our AWS Greengrass deployment. + +#### Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyNvidiaJetsonEdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "v4l2src:device=/dev/video0:!:video/x-raw,width=640,height=480:!:videoconvert:!:jpegenc", + "eiparams": "--greengrass --force-variant float32 --silent", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "__none__", + "ei_shutdown_behavior": "__none__", + "ei_ggc_user_groups": "video audio input users system render", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 65.0, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + + +#### Non-Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyNvidiaJetsonEdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "filesrc:location=/home/ggc_user/data/testSample.mp4:!:decodebin:!:videoconvert:!:videorate:!:video/x-raw,framerate=2200/1:!:jpegenc", + "eiparams": "--greengrass", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "/home/ggc_user/data/currentModel.eim", + "ei_shutdown_behavior": "wait_on_restart", + "ei_ggc_user_groups": "video audio input users system render", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 50, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + +OK! Lets continue by getting our Edge Impulse project setup! Let's go! + +[Next](../../edgeimpulseprojectbuild/) \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupqc6490ubuntu.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupqc6490ubuntu.md new file mode 100644 index 0000000000..8be5496230 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetupqc6490ubuntu.md @@ -0,0 +1,125 @@ +## Ubuntu-based QC6490 platforms + +First, please flash your QC6490 device per your manufacturers instructions to load up Ubuntu onto the device. + +### Additional Setup + +Once you have your Ubuntu platform installed and running, please run these commands to add some required dependencies: + + sudo apt update + sudo apt install -y curl unzip + sudo apt install -y gcc g++ make build-essential nodejs sox gstreamer1.0-tools gstreamer1.0-plugins-good gstreamer1.0-plugins-base gstreamer1.0-plugins-base-apps + +Additionally, we need to install the prerequisites for AWS IoT Greengrass "classic": + + sudo apt install -y default-jdk + +Lastly, its recommended to update your linux device with the latest security patches and updates if available. + +We are now setup! Before we continue, please save off the following JSONs. These JSONs will be used to configure our AWS Greengrass deployment. + +#### QC Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyQC6490UbuntuEdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "qtiqmmfsrc:name=camsrc:camera=0:!:video/x-raw,width=1280,height=720:!:videoconvert:!:jpegenc", + "eiparams": "--greengrass --force-variant float32 --silent", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "__none__", + "ei_shutdown_behavior": "__none__", + "ei_ggc_user_groups": "video audio input users", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 65.0, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + +#### USB-attached Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyQC6490UbuntuEdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "v4l2src:device=/dev/video0:!:video/x-raw,width=640,height=480:!:videoconvert:!:jpegenc", + "eiparams": "--greengrass --force-variant float32 --silent", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "__none__", + "ei_shutdown_behavior": "__none__", + "ei_ggc_user_groups": "video audio input users", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 65.0, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + +#### Non-Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyQC6490UbuntuEdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "filesrc:location=/home/ggc_user/data/testSample.mp4:!:decodebin:!:videoconvert:!:videorate:!:video/x-raw,framerate=2200/1:!:jpegenc", + "eiparams": "--greengrass", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "/home/ggc_user/data/currentModel.eim", + "ei_shutdown_behavior": "wait_on_restart", + "ei_ggc_user_groups": "video audio input users system", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 50, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + +OK! Lets continue by getting our Edge Impulse project setup! Let's go! + +[Next](../../edgeimpulseprojectbuild/) \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetuprpi5.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetuprpi5.md new file mode 100644 index 0000000000..af9477a537 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/hardwaresetuprpi5.md @@ -0,0 +1,106 @@ +## Setup and configuration of Raspberry Pi 5 with Raspberry Pi OS + +### Install RaspberryPi OS + +The Raspberry Pi 5 is a super simple device that is fully supported by Edge Impulse and AWS as an edge device. + +First step in this exercise is to install the latest version of the Raspberry Pi OS onto your RPi. A SD card will be required and typically should be at least 16GB in size. + +The easiest way to setup Raspberry Pi OS is to follow the instructions here after downloading and installing the Raspberry Pi Imager application: + +![Raspberry Pi Imager](../images/RPi_Imager.png) + +Instructions: [Install Raspberry Pi Imager](https://www.raspberrypi.com/software/) + +Please save off the IP address of your edge device along with login credentials to remote SSH into the edge device. You'll need these in the next steps. + +#### Additional Prerequisites + +First, lets open a shell into your RPi (using the Raspberry Pi OS default username of "pi" with password "raspberrypi" and having an IP address of 1.2.3.4): + + ssh pi@1.2.3.4 + +Once logged in via ssh, lets install the prerequisites that we need. Please run these commands to add some required dependencies: + + sudo apt update + sudo apt install -y curl unzip + sudo apt install -y gcc g++ make build-essential nodejs sox gstreamer1.0-tools gstreamer1.0-plugins-good gstreamer1.0-plugins-base gstreamer1.0-plugins-base-apps + +Additionally, we need to install the prerequisites for AWS IoT Greengrass "classic": + + sudo apt install -y default-jdk + +Lastly, please safe off these JSONs. These will be used to customize our AWS Greengrass custom component based upon using an RPi5 device with or without a camera: + +#### Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyRPi5EdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "v4l2src:device=/dev/video0:!:video/x-raw,width=640,height=480:!:videoconvert:!:jpegenc", + "eiparams": "--greengrass --force-variant float32 --silent", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "__none__", + "ei_shutdown_behavior": "__none__", + "ei_ggc_user_groups": "video audio input users", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 65.0, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + + +#### Non-Camera configuration + + { + "Parameters": { + "node_version": "20.18.2", + "vips_version": "8.12.1", + "device_name": "MyRPi5EdgeDevice", + "launch": "runner", + "sleep_time_sec": 10, + "lock_filename": "/tmp/ei_lockfile_runner", + "gst_args": "filesrc:location=/home/ggc_user/data/testSample.mp4:!:decodebin:!:videoconvert:!:videorate:!:video/x-raw,framerate=2200/1:!:jpegenc", + "eiparams": "--greengrass", + "iotcore_backoff": "-1", + "iotcore_qos": "1", + "ei_bindir": "/usr/local/bin", + "ei_sm_secret_id": "EI_API_KEY", + "ei_sm_secret_name": "ei_api_key", + "ei_poll_sleeptime_ms": 2500, + "ei_local_model_file": "/home/ggc_user/data/currentModel.eim", + "ei_shutdown_behavior": "wait_on_restart", + "ei_ggc_user_groups": "video audio input users system", + "install_kvssink": "no", + "publish_inference_base64_image": "no", + "enable_cache_to_file": "no", + "cache_file_directory": "__none__", + "enable_threshold_limit": "no", + "metrics_sleeptime_ms": 30000, + "default_threshold": 50, + "threshold_criteria": "ge", + "enable_cache_to_s3": "no", + "s3_bucket": "__none__" + } + } + +Alright! Lets continue by getting our Edge Impulse project setup! Let's go! + +[Next](../../edgeimpulseprojectbuild/) \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/noncameracustomcomponent.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/noncameracustomcomponent.md new file mode 100644 index 0000000000..6be44f2a4a --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/noncameracustomcomponent.md @@ -0,0 +1,46 @@ +## Non-Camera Custom Component + +For those edge devices that do not contain a camera, the following component will prepare the edge device with some sample images that can be referenced by the Edge Impulse "Runner" component's JSON configuration (via "gst\_args" settings) to direct the running model to pull its image data from the file (vs. camera). + +### 1. Clone the component repo + +Please clone this [repo](https://github.com/edgeimpulse/aws-greengrass-workshop-supplemental). You will find the following files: + + EdgeImpulseRunnerRuntimeInstallerComponent.yaml + artifacts/EdgeImpulseRunnerRuntime/1.0.0/install.sh + artifacts/EdgeImpulseRunnerRuntime/1.0.0/models.tar.gz + artifacts/EdgeImpulseRunnerRuntime/1.0.0/samples.tar.gz + +### 2. Copy the artifacts files to AWS S3 + +From the AWS dashboard, select the S3 dashboard and navigate to the same bucket you created for the "Runner" custom component. + +In that bucket, please create the following directory structure: + + artifacts/EdgeImpulseRunnerRuntime/1.0.0 + +Within the 1.0.0 directory in S3, upload these files from your cloned repo (located in ./artifacts/EdgeImpulseRunnerRuntime/1.0.0 within your cloned repo): + + install.sh + models.tar.gz + samples.tar.gz + +Next, we need to edit the EdgeImpulseRunnerRuntimeInstallerComponent.yaml and change the artifact location from "YOUR\_S3\_ARTIFACT\_BUCKET" to the actual name of your S3 bucket name (you'll see "YOUR\_S3\_ARTIFACT\_BUCKET" near the bottom of the yaml file). Save the file. + +### 3. Create the custom component + +Within the AWS dashboard, go to the IoTCore dashboard, then navigate to "Components" under the "Greengrass devices" drop-down on the left hand side. + +![CreateComponent](GG_Create_NC_Component_1.png) + +Press "Create Component" and select "YAML" as the recipe format type. Copy and paste the contents of your updated/modified file EdgeImpulseRunnerRuntimeInstallerComponent.yaml into the text window after clearing the initial contents: + +![CreateComponent](GG_Create_NC_Component_2.png) + +Finally, press "Create Component" and you should now have 2 custom components registered: + +![CreateComponent](GG_Create_NC_Component_3.png) + +Awesome! Now that the non-camera support component is created, we can go back and continue with the deployment of these components to your edge device via the AWS IoT Greengrass deployment mechanism. Press "Return to Deployment Steps" below and continue! + +[Return to Deployment Steps](../6_CustomComponentDeployment/CustomComponentDeployment.md) \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardwaresetup.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardwaresetup.md new file mode 100644 index 0000000000..dafaace9c6 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardwaresetup.md @@ -0,0 +1,24 @@ +--- +title: 1. Hardware Setup +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Edge Device Hardware Setup + +First, an edge device must be setup. In the following sections, Linux-compatible edge devices are detailed to enable them to receive and run as a AWS IoT Greengrass edge device. The list of supported devices will grow over time. + +Please select one of the following and follow the "Setup" link... + +### Option 1: Ubuntu EC2 Instance [Setup](/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/HardwareSetupEC2/) + +### Option 2: Qualcomm QC6490 Platforms with Ubuntu [Setup](/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/HardwareSetupQC6490Ubuntu/) + +### Option 3: Nvidia Jetson Platforms with Jetpack 5.x/6.0 [Setup](/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/HardwareSetupNvidiaJetson/) + +### Option 4: Raspberry Pi 5 with RaspberryPi OS [Setup](/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/hardware/HardwareSetupRPi5/) + + +#### (More exciting device options will be added soon. Stay tuned!) \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/architecture.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/architecture.png new file mode 100644 index 0000000000..fc61b45180 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/architecture.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/cats_expected.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/cats_expected.png new file mode 100644 index 0000000000..10a73f9f78 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/cats_expected.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/dogsandcats.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/dogsandcats.png new file mode 100644 index 0000000000..535157b709 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/dogsandcats.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/dogsandcats_expected.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/dogsandcats_expected.png new file mode 100644 index 0000000000..5af0fb09ed Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/dogsandcats_expected.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_1.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_1.png new file mode 100644 index 0000000000..93947e01d0 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_1.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_2.png new file mode 100644 index 0000000000..23bec80960 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_3.png new file mode 100644 index 0000000000..6b02c54b36 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4.png new file mode 100644 index 0000000000..b198644b8c Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4_4912.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4_4912.png new file mode 100644 index 0000000000..4b3aa4eedf Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4_4912.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4_ns.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4_ns.png new file mode 100644 index 0000000000..cb7c257906 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_4_ns.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_5.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_5.png new file mode 100644 index 0000000000..8264272108 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_5.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_6.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_6.png new file mode 100644 index 0000000000..5aec525fdf Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_6.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_7.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_7.png new file mode 100644 index 0000000000..e14a2a8464 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_7.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_8.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_8.png new file mode 100644 index 0000000000..39332b1cc1 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ec2_setup_8.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_1.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_1.png new file mode 100644 index 0000000000..b8fb81cf93 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_1.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_2.png new file mode 100644 index 0000000000..229b2a4424 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_3.png new file mode 100644 index 0000000000..49a2c9324c Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_4.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_4.png new file mode 100644 index 0000000000..88194a9eff Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_4.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_5.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_5.png new file mode 100644 index 0000000000..f58708de55 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_clone_5.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_inference_output.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_inference_output.png new file mode 100644 index 0000000000..fe58d84951 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_inference_output.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_1.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_1.png new file mode 100644 index 0000000000..17321622af Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_1.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_2.png new file mode 100644 index 0000000000..6c55ff1858 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_3.png new file mode 100644 index 0000000000..b4a6929c7f Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_key_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_model_metrics.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_model_metrics.png new file mode 100644 index 0000000000..675d148e6e Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_model_metrics.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_1.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_1.png new file mode 100644 index 0000000000..e4966864c3 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_1.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_2.png new file mode 100644 index 0000000000..2fd938a1fc Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_3.png new file mode 100644 index 0000000000..79149e96fa Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_4.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_4.png new file mode 100644 index 0000000000..b172b542cf Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_4.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_5.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_5.png new file mode 100644 index 0000000000..fbb1ae4345 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_5.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_6.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_6.png new file mode 100644 index 0000000000..12cf5812e7 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_project_6.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_1.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_1.png new file mode 100644 index 0000000000..4b65ce56b3 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_1.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_2.png new file mode 100644 index 0000000000..566af7a5d1 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_3.png new file mode 100644 index 0000000000..5497e6af24 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_4.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_4.png new file mode 100644 index 0000000000..1d69a337cd Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_4.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_5.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_5.png new file mode 100644 index 0000000000..e214f6da78 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/ei_signup_5.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_component.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_component.png new file mode 100644 index 0000000000..53bd031b46 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_component.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment.png new file mode 100644 index 0000000000..0ce872961f Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_2.png new file mode 100644 index 0000000000..b2462ac486 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_3.png new file mode 100644 index 0000000000..4b396a2540 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_3a.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_3a.png new file mode 100644 index 0000000000..bb276948c0 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_3a.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_4.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_4.png new file mode 100644 index 0000000000..d5ee8434c7 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_4.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_5.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_5.png new file mode 100644 index 0000000000..2af77afc0b Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_5.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_6.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_6.png new file mode 100644 index 0000000000..711727ad33 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_deployment_6.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_1.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_1.png new file mode 100644 index 0000000000..6da33e049e Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_1.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_2.png new file mode 100644 index 0000000000..b56d4e8455 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_3.png new file mode 100644 index 0000000000..dc1f32b1cb Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_create_nc_component_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_device.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_device.png new file mode 100644 index 0000000000..18ddde044d Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_device.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_device2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_device2.png new file mode 100644 index 0000000000..89b75736e4 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_device2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam.png new file mode 100644 index 0000000000..827ce1bab9 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_2.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_2.png new file mode 100644 index 0000000000..1f048c4dfd Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_2.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_3.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_3.png new file mode 100644 index 0000000000..5bf5fdf8de Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_3.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_4.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_4.png new file mode 100644 index 0000000000..63480c4be1 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_4.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_5.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_5.png new file mode 100644 index 0000000000..a79e9149e9 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_5.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_6.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_6.png new file mode 100644 index 0000000000..2542fafc53 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_6.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_dashboard.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_dashboard.png new file mode 100644 index 0000000000..47c8821e12 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/gg_install_iam_dashboard.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/iam_ter_update.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/iam_ter_update.png new file mode 100644 index 0000000000..c8e0f39d60 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/iam_ter_update.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/rpi_imager.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/rpi_imager.png new file mode 100644 index 0000000000..65905cf344 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/rpi_imager.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/s3_create_bucket.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/s3_create_bucket.png new file mode 100644 index 0000000000..c98b753337 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/s3_create_bucket.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/s3_upload_artifacts.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/s3_upload_artifacts.png new file mode 100644 index 0000000000..aac56840b9 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/s3_upload_artifacts.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/sm_create_secret.png b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/sm_create_secret.png new file mode 100644 index 0000000000..fd8317c701 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/images/sm_create_secret.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/overview.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/overview.md new file mode 100644 index 0000000000..a196732bfb --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/overview.md @@ -0,0 +1,52 @@ +--- +title: 0. Overview +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +# Edge Impulse with AWS IoT Greengrass + +AWS IoT Greengrass is an AWS IoT service that enables edge devices with customizable/downloadable/installable "components" that can be run to augment what's running on the edge device itself. AWS IoT Greengrass permits the creation and publication of a "Greengrass Component" that is effectively a set of instructions and artifacts that, when installed and run, create and initiate a custom specified service. + +For more information about AWS IoT Core and AWS Greengrass please review: [AWS IoT Greengrass](https://docs.aws.amazon.com/greengrass/v2/developerguide/what-is-iot-greengrass.html) + +## Overview + +The Edge Impulse integration with AWS IoT Core and AWS IoT Greengrass is structured as follows: + +![Architecture](images/Architecture.png) + +* The Edge Impulse "Runner" service now has a "--greengrass" option that enables the integration. +* AWS Secrets Manager is used to protect the Edge Impulse API Key by removing it from view via command line arguments. +* The Edge Impulse "Runner" service can relay inference results into IoT Core for further processing in the cloud +* The Edge Impulse "Runner" service relays model performance metrics, at configurable intervals, into IoTCore for further processing. +* The Edge Impulse "Runner" service has accessible commands that can be used to configure the service real-time as well as retrieve information about the model/service/configuration. +* More information regarding the Edge Impulse "Runner" service itself can be found [here](https://docs.edgeimpulse.com/docs/tools/edge-impulse-for-linux/linux-node-js-sdk). + +Edge Impulse has several custom Greengrass components that can be deployed and run on the Greengrass-enabled edge device to enable this integration. The component recipes and artifacts can be found [here](https://github.com/edgeimpulse/aws-greengrass-components). Lets examine one of those components that we'll used for this workshop! + +### The "EdgeImpulseLinuxRunnerServiceComponent" Greengrass Component + +The Edge Impulse "Runner" service downloads, configures, installs, and executes an Edge Impulse model, developed for the specific edge device, and provides the ability to retrieve model inference results. In this case, our component for this service will relay the inference results into AWS IoT Core under the following topic: + + /edgeimpulse/device//inference/output + +Additionally, model performance metrics will be published, at defined intervals, here: + + /edgeimpulse/device//model/metrics + +Lastly, the Edge Impulse "Runner" service has been upgrade to support a set of bi-directional commands that can be accessed via publication of specific JSON structures to the following topic: + + /edgeimpulse/device//command/input + +Command results are published to the following topic: + + /edgeimpulse/device//command/output + +The command reference, including JSON structure details, can be found [here](https://docs.edgeimpulse.com/docs/integrations/aws-greengrass#commands-january-2025-integration-enhancements). + +Lets dive deeper into this integration starting with setting up our own edge device! + +Lets go! diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/running.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/running.md new file mode 100644 index 0000000000..9fb6ad386b --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/running.md @@ -0,0 +1,117 @@ +--- +title: 7. Running +weight: 9 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Running + +Now that we have our Edge Impulse component(s) deployed to our edge device, how do we confirm things are working? + +Simple! + +On your browser, open the following url: + + http://:4912 + +So, for example, if my public ip address of my edge device is "1.1.1.1", my url would be: + + http://1.1.1.1:4912 + +You should now see both the imput (video either from file or from your edge devices attached camera) as well as inference results and inference times. There are two output scenarios depending on whether your edge device has a camera or does not have a camera... read below! + +### Option 1: Edge devices with cameras + +You should be able to see live video of your camera via the url above. Now, point your camera at this picture: + +![CatsNDogs](./images/DogsAndCats.png) + +You should see that your model, running on the edge, is identifying both the dog and the cat! It should look something like this: + +![CatsNDogs](./images/DogsAndCats_expected.png) + +### Option 2: Edge devices without cameras + +In this case, you don't have a camera to use but your component is actually configured to pull its image data from local files installed by the optional non-camera component. + +In this instance, a video of a cat will be shown. The url above should be displaying something similar to this: + +![CatsNDogs](./images//Cats_expected.png) + +Now, if yours looks to be frozen... don't worry! It simply means that the "Runner" has completed playing the 90 second cat video. The "Runner" service is now waiting for you to issue a "restart" command to replay the same video... please continue reading below... we'll outline how to dispatch the "restart" command via AWS IoTCore! + +### AWS IoTCore Integration + +With our installed components, we can also examine the ML inference output in AWS IoTCore. + +From the AWS Dashboard, bring up the IoTCore dashboard. Select the "MQTT Test Client" from the left hand side: + +In the "Subscribe to a topic" section, enter this and press "subscribe": + + /edgeimpulse/device/# + +For those edge devices WITH cameras, you should see output on the left whenever your model identifies a cat and/or dog. The output format should look something like this: + +![Inference Output](./images/EI_Inference_output.png) + +Additionally, you will see, model metrics being published periodically: + +![Model Metrics](./images/EI_Model_Metrics.png) + +#### Issuing a command and examing the command result + +The integration provides a set of commands (see the [Summary](8_Summary.md) for details on the commands). One command, in particular, restarts the Edge Impulse "Runner" service. + +In order to use commands we have to know what our device is "named" in IoTCore. You can easily find this by looking the inference output in the "MQTT Test Client": the publication "topic" is shown for each inference result you see. The topic structure is as follows: + + /edgeimpulse/devices//inference/output + /edgeimpulse/devices//model/metrics + /edgeimpulse/devices//command/output + /edgeimpulse/devices//command/input + +You will want to copy and save off the "my_device_name" portion of the topics that YOU see in your "MQTT Test Client" dashboard's inference results. + +Once you have the device name, back on the "MQTT Test Client" dashboard, select the "Publish to a topic" tab and enter this topic (but with YOUR device name filled in): + + /edgeimpulse/devices//command/input + +Clear out the message content window and add the following JSON: + + { + "cmd": "restart" + } + +Click on the "Additional configuration" button and enable the "Retain message on this topic" checkbox. + +Press the "Publish" button. + +What you should see now is that on the topic + + /edgeimpulse/devices//command/output + +notifications that the runner service has been restarted. On your browser, navigate back to: + + http://:4912 + +and you should see your inferencing resuming. You should also see more inference output in IoTCore on this topic: + + /edgeimpulse/devices//inference/output + +>**_NOTE:_** +>For those who have edge devices WITHOUT cameras, your runner will read is input image video and report inferences until the video ends. Once ended, the "Runner" will simply wait for you to issue the above "restart" command to replay the video file. The restart command will cause the Runner to restart and it will once again, play the video file. + +Cool! Congradulations! You have completed this workshop!! + +#### Supplemental notes +Below are a few additional notes regarding the component deployment, log files, launch times for some devices: + +>**_NOTE:_** +>After the deployment is initiated, on the FIRST invocation of a given deployment, expect to wait several moments (upwards of 5-10 min in fact) while the component installs all of the necessary pre-requisites that the component requires... this can take some time so be patient. You can also log into the edge gateway, receiving the component, and examine log files found in /greengrass/v2/logs. There you will see each components' current log file (same name as the component itself... ie. EdgeImpulseLinuxServiceComponent.log...) were you can watch the installation and invocation as it happens... any errors you might suspect will be shown in those log files. + +>**_NOTE:_** +>While the components are running, in addition to the /greengrass/v2/logs directory, each component has a runtime log in /tmp. The format of the log file is: "ei\_lockfile\_[linux | runner | serial]\_\.log. Users can "tail" that log file to watch the component while it is running. + +>**_NOTE:_** +>Additionally, for Jetson-based devices where the model has been compiled specifically for that platform, one can expect to have a 2-3 minute delay in the model being loaded into the GPU memory for the first time. Subsequent invocations will be very short. diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/secretmanagersetup.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/secretmanagersetup.md new file mode 100644 index 0000000000..78cdaf0462 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/secretmanagersetup.md @@ -0,0 +1,26 @@ +--- +title: 4. Secrets Manager Configuration +weight: 6 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Gather and install an EdgeImpulse API Key into AWS Secrets Manager + +First we have to create an API Key in Edge Impulse via the Studio. + +Next, we will go into AWS Console -> Secrets Manager and press "Store a new secret". From there we will specify: + + 1. Select "Other type of secret" + 2. Enter "ei_api_key" as the key NAME for the secret (goes in the "Key" section) + 3. Enter our actual API Key (goes in the "Value" section) + 4. Press "Next" + 5. Enter "EI_API_KEY" for the "Secret Name" (actually, this is its Secret ID...) + 6. Press "Next" + 7. Press "Next" + 8. Press "Store" + +![CreateSecret](./images/SM_Create_Secret.png) + +Next we will install the EdgeImpulse Custom Greengrass Component we'll be using. diff --git a/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/summary.md b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/summary.md new file mode 100644 index 0000000000..171be46fdc --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/edge_impulse_greengrass/summary.md @@ -0,0 +1,384 @@ +--- +title: 8. Summary/Conclusions +weight: 10 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Summary + +Congradulations! You have completed this workshop! Please select "Next" below to read a bit about cleaning up your AWS environment in order to minimize costs/etc (AWS workshop attendees: this will happen automatically for you) + +### For More Information + +Below is some detailed reference information regarding the Edge Impulse AWS IoT Integration + +## Model Metrics + +Basic model metrics are now accumulated and published in the integration into IoT Core. The metrics will be published at specified intervals (per the "metrics\_sleeptime\_ms" component configuration parameter) to the following IoT Core topic: + + /edgeimpulse/device//model/metrics + +The metrics published are: + +* **accumulated mean**: running accumulation of the average confidences from the linux runner while running the current model +* **accumulated standard deviation**: running accumulation of the standard deviation from the linux runner while running the current model + +The format of the model metrics output is as follows: + + { + "mean_confidence": 0.696142, + "standard_deviation": 0.095282, + "confidence_trend": "decr", + "details": { + "n": 5, + "sum_confidences": 3.480711, + "sum_confidences_squared": 2.468464 + }, + "ts": 1736016142920, + "id": "e4faa78b-2a09-40d1-adfd-8e5fc32feb11" + } + +## Command Reference + +In the 2025 January integration update, the following commands are now available with the Edge Impulse Greengrass Linux Runner Greengrass integration. The following commands are dispatched via the integration's IoT Core Topic as a JSON: + + /edgeimpulse/device//command/input + +Results from the command can be found using the following topic: + + /edgeimpulse/device//command/output + +Command JSON structure is defined as follows: + + { + "cmd": , + "value": + } + +The currently supported commands are described below: + +### Initial Invocation + +When the runner process is started/restarted, the following JSON will be published to the command output topic above: + + { + "result": { + "status": "started", + "ts": 1736026956853, + "id": "5c4e627e-6e9d-4382-bba7-00c0129705c4" + } + } + +This JSON can be used to flag a new invocation of the runner service (or a restart of the runner service). If there are any previous runtime changes made (i.e. confidence filter settings for example... see below), those changes can be resent to the newly invoked runtime. + +### Restart Command + +##### Command JSON: + + { + "cmd": "restart" + } + +##### Command Description: + +This command directs the integration to "restart" the Edge Impulse linux runner process. In conjunction with the "ei\_shutdown\_behavior" option being set to "wait\_for\_restart", the linux runner process will continue operating after the model has completed its operation. The linux runner process will continue to process input commands and will restart the linux runner via dispatching this command. + +### Enable Threshold Filter Command + +##### Command JSON: + + { + "cmd": "enable_threshold_filter" + } + +##### Command Description: + +This command directs the integration to enable the threshold filter. The filter will control which inferences will get published into IoT Core. By default the filter is disabled so that all inferences reported are sent into IoT Core. + +##### Command Result: + +The command output will be published as follows and will include the filter config: + + { + "result": { + "threshold_filter_config": { + "enabled": "yes", + "confidence_threshold": 0.7, + "threshold_criteria": "ge" + } + } + } + +### Disable Threshold Filter Command + +##### Command JSON: + + { + "cmd": "disable_threshold_filter" + } + +##### Command Description: + +This command directs the integration to disable the threshold filter. + +##### Command Result: + +The command output will be published as follows and will include the filter config: + + { + "result": { + "threshold_filter_config": { + "enabled": "no", + "confidence_threshold": 0.7, + "threshold_criteria": "ge" + } + } + } + +### Set Threshold Filter Criteria Command + +##### Command JSON: + + { + "cmd": "set_threshold_filter_criteria", + "value": "ge" + } + +##### Command Description: + +This command directs the integration to set the threshold filter criteria. The available options for the criteria are: + +* **"gt"**: publish if inference confidence is "greater than"... +* **"ge"**: publish if inference confidence is "greater than or equal to"... +* **"eq"**: publish if inference confidence is "equal to"... +* **"le"**: publish if inference confidence is "less than or equal to"... +* **"gt"**: publish if inference confidence is "less than"... + +##### Command Result: + +The command output will be published as follows: + + { + "result": { + "criteria": "gt" + } + } + +### Get Threshold Filter Criteria Command + +##### Command JSON: + + { + "cmd": "get_threshold_filter_criteria" + } + +##### Command Description: + +This command directs the integration to get the threshold filter criteria. The currently set threshold criteria is published to the command output topic above. + +##### Command Result: + +The command output will be published as follows with the configured criteria: + + { + "result": { + "criteria": "gt" + } + } + + +### Set Threshold Filter Confidence Command + +##### Command JSON: + + { + "cmd": "set_threshold_filter_confidence", + "value": 0.756 + } + +##### Command Description: + +This command directs the integration to set the threshold filter confidence bar. The value set must be a value 0 < x <= 1.0 + +##### Command Result: + +The command output will be published as follows with the specified confidence bar: + + { + "result": { + "confidence_threshold": "0.756" + } + } + + +### Get Threshold Filter Confidence Command + +##### Command JSON: + + { + "cmd": "get_threshold_filter_confidence" + } + +##### Command Description: + +This command directs the integration to get the threshold filter confidence bar. The currently set threshold confidence value is published to the command output topic above. + +##### Command Result: + +The command output will be published as follows with the currently configured confidence bar: + + { + "result": { + "confidence_threshold": "0.756" + } + } + +### Get Threshold Filter Config Command + +##### Command JSON: + + { + "cmd": "get_threshold_filter_config" + } + +##### Command Description: + +This command directs the integration to retrieve the current threshold filter config. The currently set threshold filter config is published to the command output topic above. + +##### Command Result: + +The command output will be published as follows with the currently configured filter config: + + { + "result": { + "threshold_filter_config": { + "enabled": "no", + "confidence_threshold": "0.756", + "threshold_criteria": "gt" + } + } + } + +### Get Model Info Command + +##### Command JSON: + + { + "cmd": "get_model_info" + } + +##### Command Description: + +This command directs the integration to retrieve the currently running model information. The model information is published to the command output topic above. + +##### Command Result: + +The command output will be published as follows with the current model information: + + { + "result": { + "model_info": { + "model_name": "occupant_counter", + "model_version": "v25", + "model_params": { + "axis_count": 1, + "frequency": 0, + "has_anomaly": 0, + "image_channel_count": 3, + "image_input_frames": 1, + "image_input_height": 640, + "image_input_width": 640, + "image_resize_mode": "fit-longest", + "inferencing_engine": 6, + "input_features_count": 409600, + "interval_ms": 1, + "label_count": 1, + "labels": [ + "person" + ], + "model_type": "object_detection", + "sensor": 3, + "slice_size": 102400, + "threshold": 0.5, + "use_continuous_mode": false, + "sensorType": "camera" + } + } + } + } + +### Reset Model Metrics Command + +##### Command JSON: + + { + "cmd": "reset_metrics" + } + +##### Command Description: + +This command directs the integration to reset the model metrics counters. + +##### Command Result: + +The command output will be published as follows to indicate the metrics counters are reset: + + { + "result": { + "metrics_reset": "OK" + } + } + +### Clear Cache Command + +##### Command JSON: + + { + "cmd": "clear_cache" + } + +##### Command Description: + +This command directs the integration to clear the currently configured inference image cache. The entire cache will be cleared. This command is sensitive to the Greengrass component configuration (i.e. which inference caches are enabled/disabled). This command will clear ALL caches that are currently enabled in the component configuration. + +##### Command Result: + +The command output will be published as follows with the clear cache results: + + { + "result": { + "clear_cache": { + "local": "OK", + "s3": "OK" + } + } + } + +### Clear Specified File From Cache Command + +##### Command JSON: + + { + "cmd": "clear_cache_file" + "value": + } + +##### Command Description: + +This command directs the integration to clear the specified file (by its uuid) from within the inference cache. This command is sensitive to the Greengrass component configuration (i.e. which inference caches are enabled/disabled). This command will clear the specified file from ALL enabled caches per the component configuration. + +##### Command Result: + +The command output will be published as follows with the clear cache results for the specified UUID: + + { + "result": { + "clear_cache_file": { + "local": "OK", + "s3": "OK", + "uuid": "e4faa78b-2a09-40d1-adfd-8e5fc32feb11" + } + } + } \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md b/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md index b3fb6106d3..382bcb535f 100644 --- a/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md +++ b/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md @@ -58,6 +58,5 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md b/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md index 83505cecbd..86f20d13fd 100644 --- a/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md +++ b/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md @@ -53,6 +53,5 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- \ No newline at end of file diff --git a/content/learning-paths/laptops-and-desktops/win-resource-ps1/_index.md b/content/learning-paths/laptops-and-desktops/win-resource-ps1/_index.md index 93ea7d9143..77e4f28328 100644 --- a/content/learning-paths/laptops-and-desktops/win-resource-ps1/_index.md +++ b/content/learning-paths/laptops-and-desktops/win-resource-ps1/_index.md @@ -1,18 +1,14 @@ --- -title: Track resource usage of applications on Windows on Arm - -draft: true -cascade: - draft: true +title: Measure application resource and power usage on Windows on Arm with FFmpeg and PowerShell minutes_to_complete: 60 -who_is_this_for: This is an introductory topic for developers who want to measure resource usage of applications on Windows on Arm devices. +who_is_this_for: This is an introductory topic for developers who want to measure resource usage of applications on Windows on Arm devices using FFmpeg. learning_objectives: - - Run video encode and decode tasks by using FFmpeg - - Benchmark the video encode task - - Sample CPU, memory, and power usage for the video decode task + - Measure application resource usage using FFmpeg and PowerShell + - Benchmark a video encoding task + - Monitor CPU, memory, and power consumption during a video decode task prerequisites: - A Windows on Arm computer such as the Lenovo Thinkpad X13s running Windows 11 @@ -35,7 +31,7 @@ operatingsystems: further_reading: - resource: - title: Recording for Resource-based Analysis + title: Recording for resource-based analysis link: https://learn.microsoft.com/en-us/previous-versions/windows/it-pro/windows-8.1-and-8/hh448202(v=win.10) type: documentation - resource: diff --git a/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-1.md b/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-1.md index 0b0e8b6856..7ef5e4fc77 100644 --- a/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-1.md @@ -7,58 +7,52 @@ layout: learningpathall --- ## Overview -System resource usage provides an approach to understand the performance of an application as a black box. This Learning Path demonstrates how to sample system resource usage using a script. +To understand how well your application performs, you can measure its system resource usage (such as CPU time and memory) without needing to look inside the code. In this Learning Path, you'll learn how to collect resource usage data using a simple script. This helps you compare performance between different builds and see how efficiently your system runs each version. The example application you will use is FFmpeg, a tool set that performs video encode and decode tasks. You will run the same tests with both the x86_64 binary (using Windows instruction emulation) and the Arm64 native binary on a Windows on Arm computer. -## Application -Binary builds of FFmpeg are available, so you don't need to build them from source. +## Download the packages +You don't need to compile FFmpeg from source. You can download the pre-built binaries to begin your testing: -To get started: +- First, download the [FFmpeg x86_64 package](https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2025-07-31-14-15/ffmpeg-n7.1.1-56-gc2184b65d2-win64-gpl-7.1.zip). +- Next, download the [FFmpeg Arm64 native package](https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2025-07-31-14-15/ffmpeg-n7.1.1-56-gc2184b65d2-winarm64-gpl-7.1.zip). -1. Download the [FFmpeg x86_64 package](https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2025-07-31-14-15/ffmpeg-n7.1.1-56-gc2184b65d2-win64-gpl-7.1.zip). +## Unzip the downloaded packages -2. Download the [FFmpeg Arm64 native package](https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2025-07-31-14-15/ffmpeg-n7.1.1-56-gc2184b65d2-winarm64-gpl-7.1.zip). - -3. Unzip the downloaded packages. - -You can find the binaries in the `bin` folder. +Once you've downloaded both packages, unzip them. You'll find the binaries in the `bin` folder inside each package. The x86_64 version is for emulation, while the Arm64 version runs natively on your Windows on Arm device. Double-check the folder names so you don't mix them up. {{% notice Note %}} -Make note of the paths to both versions of `ffmpeg.exe` and `ffplay.exe`, so you can run each one and compare the results. +It's a good idea to create a separate folder for each version. Make a note of where you put both `ffmpeg.exe` and `ffplay.exe` for each version as you'll need these paths soon to run your tests and compare results. {{% /notice %}} -## Video source -Download the test video [RaceNight](https://ultravideo.fi/video/RaceNight_3840x2160_50fps_420_8bit_YUV_RAW.7z) from a public dataset. +Now you're set up with both versions of FFmpeg. Next, you'll use these binaries to encode a video and see how each one performs. + +## Download the video source -Unzip the package and note the path to the uncompressed `.yuv` file. +For this test, you'll use a sample video called RaceNight. Download it from this [RaceNight public dataset](https://ultravideo.fi/video/RaceNight_3840x2160_50fps_420_8bit_YUV_RAW.7z). -## Video encoding -The downloaded video file is in YUV raw format, which means playback of the video file involves no decoding effort. You need to encode the raw video with compression algorithms to add computation pressure during playback. +Unzip the package and make a note of the path to the `.yuv` file inside. -Use `ffmpeg.exe` to compress the YUV raw video with the x265 encoder and convert the file format to `.mp4`. +## Encode the video +The video you downloaded is in `.yuv` raw format. This means it's uncompressed and ready for processing, so no decoding step is needed. To test your system's performance, you'll use FFmpeg to compress the video with the x265 encoder and convert it to an `.mp4` file. This workflow lets you measure how efficiently each FFmpeg binary handles video encoding on your Windows on Arm device. -Assuming you downloaded the files and extracted them in the current directory, open a terminal and run the following command: +Assuming everything is in your current directory, open a terminal and run this command: ```console -ffmpeg-n7.1.1-56-gc2184b65d2-win64-gpl-7.1\ffmpeg-n7.1.1-56-gc2184b65d2-win64-gpl-7.1\bin\ffmpeg.exe -f rawvideo -pix_fmt yuv420p -s 3840x2160 -r 50 -i RaceNight_3840x2160_50fps_420_8bit_YUV_RAW\RaceNight_3840x2160_50fps_8bit.yuv -vf scale=1920:1080 -c:v libx265 -preset medium -crf 20 RaceNight_1080p.mp4 -benchmark -stats -report +ffmpeg-n7.1.1-56-gc2184b65d2-win64-gpl-7.1\ffmpeg-n7.1.1-56-gc2184b65d2-win64-gpl-7.1\bin\ffmpeg.exe -f rawvideo -pix_fmt yuv420p -s 3840x2160 -r 50 -i RaceNight_3840x2160_50fps_420_8bit_YUV_RAW\RaceNight_3840x2160_50fps_8bit.yuv -vf scale=1920:1080 -c:v libx265 -preset medium -crf 20 RaceNight_1080p.mp4 -benchmark -stats -report ``` {{% notice Note %}} -Modify the paths to `ffmpeg.exe` and the YUV raw video file to match your locations. +Make sure to update the paths to `ffmpeg.exe` and the `yuv.` video file to match where you saved them. {{% /notice %}} -The command transforms the video size and compresses the video into an MP4 file using H.265 encoding (via the x265 encoder). +This command resizes the video and compresses it into an MP4 file using H.265 encoding (using the x265 encoder). The `-benchmark` option shows performance stats while the encoding runs. When it's done, you'll have a new file called `RaceNight_1080p.mp4`. -The `benchmark` option is turned on to show performance data at the same time. +Try running the command with both the x86_64 and Arm64 versions of FFmpeg. Then, compare the results to see which one is faster. -The generated file will be at RaceNight_1080p.mp4. +## View the results -Run the command with both the x86_64 and the Arm64 versions of FFmpeg and compare the output. - -### View results - -The output below is from the x86_64 version of `ffmpeg.exe`: +Here's what the output looks like for the x86_64 version of `ffmpeg.exe`: ```output x265 [info]: tools: rd=3 psy-rd=2.00 early-skip rskip mode=1 signhide tmvp @@ -83,7 +77,7 @@ x265 [info]: Weighted P-Frames: Y:0.0% UV:0.0% encoded 600 frames in 71.51s (8.39 fps), 9075.96 kb/s, Avg QP:27.27 ``` -The output below is from the Arm64 native compiled `ffmpeg.exe`: +And here's the output from the Arm64 native version: ```output x265 [info]: tools: rd=3 psy-rd=2.00 early-skip rskip mode=1 signhide tmvp @@ -108,6 +102,8 @@ x265 [info]: Weighted P-Frames: Y:0.0% UV:0.0% encoded 600 frames in 26.20s (22.90 fps), 9110.78 kb/s, Avg QP:27.23 ``` -The last line of each output shows the run time and the frames per second for each build of FFmpeg. +Check out the last line in each output. The run time and frames per second show how each build performed. The Arm64 version is much faster, thanks to running natively on your hardware. + +## Review your progress and compare performance -Continue to learn how to track resource usage and compare each version. \ No newline at end of file +You've successfully set up both x86_64 and Arm64 versions of FFmpeg, downloaded a sample video, and encoded it using each binary on your Windows on Arm device. By comparing the output, you've seen firsthand how native Arm64 performance outpaces emulated x86_64. This gives you a solid foundation for deeper resource usage analysis in the next section. diff --git a/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-2.md b/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-2.md index 0d6f7edfd7..bd467f46e8 100644 --- a/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-2.md @@ -1,16 +1,17 @@ --- -title: Track system resources +title: Track system resource usage on Windows on Arm with PowerShell weight: 3 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Sample video decoding resource usage +## Analyze resource usage during sample video decoding -To monitor resource usage during video decoding, use the following PowerShell script. This script starts the decoding process, periodically records CPU and memory statistics, and saves the results to a CSV file for analysis. -Open your code editor, copy the content below, and save it as `sample_decoding.ps1`. +To monitor resource usage during video decoding, use the following PowerShell script. This script starts the decoding process, periodically records CPU and memory statistics, and then saves the results to a CSV file for analysis. + +Open your code editor, copy the content below, and save it as `sample_decoding.ps1`: ```PowerShell { line_numbers = true } param ( @@ -114,24 +115,23 @@ Run the script: Set-ExecutionPolicy -Scope Process RemoteSigned .\sample_decoding.ps1 ``` - -A video starts playing and completes in 3 minutes. When finished, you can find the results file `usage_log.csv` in the current directory. +When you run the script, the video plays for about three minutes. After playback finishes, you'll find the results file named `usage_log.csv` in your current directory. Open this file with a spreadsheet application to review and analyze the recorded resource usage data. {{% notice Note %}} -Script execution may be blocked due to security policy configuration. The `Set-ExecutionPolicy` command allows local scripts to run during this session. +Script execution might be blocked due to security policy configuration. The `Set-ExecutionPolicy` command allows local scripts to run during this session. {{% /notice %}} -### Script explanation +## Understand what the script does The `param` section defines variables including the binary path, video playback arguments, sampling interval, and result file path. You can modify these values as needed. -Lines 15-26 check and modify the binary file attributes. The binaries in use are downloaded from the web and may be blocked from running due to lack of digital signature. These lines unlock the binaries. +Lines 15–26 check whether the binary file is blocked by Windows security settings. When you download executables from the web, Windows may prevent them from running if they lack a digital signature. This section attempts to unlock the binary using the `Unblock-File` command, allowing the script to run the application without security restrictions. Line 41 retrieves all child processes of the main process. The statistical data includes resources used by all processes spawned by the main process. The `while` section collects CPU and memory usage periodically until the application exits. The CPU usage represents accumulated time that the process runs on the CPU. The memory usage shows the size of memory occupation with or without shared spaces accounted for. -### View results +### View the results The output below shows the results from running the x86_64 version of `ffplay.exe`: @@ -150,5 +150,4 @@ Timestamp,CPU Sum (s),Memory Sum (MB),Memory Private Sum (MB),CPU0 (s),Memory0 ( ...... 2025-08-18T10:39:01.7856168+08:00,329.109375,352.53,339.96,329.09375,340.23046875,338.20703125,0.015625,12.30078125,1.75390625 ``` - -The sample result file uses CSV (comma-separated values) format. You can open it with spreadsheet applications like Microsoft Excel for better visualization and create charts for data analysis. +The sample result file is in CSV (comma-separated values) format. Open it with a spreadsheet application such as Microsoft Excel to view the data in a table. You can use built-in chart tools to visualize CPU and memory usage over time, making it easier to spot trends and compare performance between Arm64 and x86_64 versions. diff --git a/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-3.md b/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-3.md index d4494f3247..da20f48e35 100644 --- a/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-3.md +++ b/content/learning-paths/laptops-and-desktops/win-resource-ps1/how-to-3.md @@ -1,12 +1,12 @@ --- -title: Measure power usage +title: Measure power usage on Windows on Arm with PowerShell weight: 4 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Sampling battery status +## Sample the battery status Querying battery status provides a way to measure power usage without an external power meter. Battery monitoring is also convenient because data collection and logging can be automated. @@ -77,7 +77,7 @@ A video starts playing and completes in 30 minutes. When finished, you can find The script collects remaining battery capacity and discharge rate periodically. You can track the battery remaining capacity to understand the power consumption patterns. -### View results +### View the results The output below shows the results from running the x86_64 version of `ffplay.exe`: @@ -100,3 +100,7 @@ Timestamp,RemainingCapacity(mWh),DischargeRate(mW) The sample results file is in CSV format. You can open it with spreadsheet applications like Microsoft Excel for better visualization and to plot data analysis charts. Battery monitoring provides an effective way to measure power consumption differences between x86_64 and native Arm64 applications. By comparing discharge rates, you can quantify the power efficiency advantages that Arm processors typically demonstrate for video decoding workloads. + +## Wrapping up and next steps + +You’ve measured power usage on your Windows on Arm device and can now compare results across builds to see how native Arm64 performance affects battery life. Sharing your findings with the Arm developer community helps others optimize applications for Windows on Arm. diff --git a/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/1-install-plugin.md b/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/1-install-plugin.md index e2b10102fb..d4ce0ad4ca 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/1-install-plugin.md +++ b/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/1-install-plugin.md @@ -35,7 +35,8 @@ Before you begin, download the required plugins and dependencies. These two repo [**Neural Super Sampling Unreal Engine Plugin** → GitHub Repository](https://github.com/arm/neural-graphics-for-unreal) -Download the latest release package and extract it on your Windows machine. +Download the latest release package and extract it on your Windows machine. Use the folder corresponding to your Unreal version. + ### 2. Download the runtime for ML Extensions for Vulkan [**Unreal NNE Runtime RDG for ML Extensions for Vulkan** → GitHub Repository](https://github.com/arm/ml-extensions-for-vulkan-unreal-plugin). diff --git a/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/2-emulation-layer.md b/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/2-emulation-layer.md index 95d00da6cc..3ea024ac64 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/2-emulation-layer.md +++ b/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/2-emulation-layer.md @@ -65,17 +65,30 @@ By default, Unreal uses DirectX. Instead, you need to choose Vulkan as the defau ![Project Settings with Vulkan selected as Default RHI under Targeted RHIs#center](./images/targeted_rhis.png "Figure 4: Set Vulkan as the default RHI.") +## Create the Plugins directory -## Add and enable the plugins +Open your project directory in Windows explorer, and create a new folder called `Plugins`. -1. Open your project directory in Windows explorer, and create a new folder called `Plugins`. -2. Copy the downloaded and extracted `.zips` into the new directory: +Enabling the plugin will look slightly different depending on what Unreal version you are using. Follow the steps corresponding to your setup. + +## For Unreal 5.5 + +1. Copy the downloaded and extracted `.zip` into the new `Plugins` directory: + - `UE5.5` - `NNERuntimeRDGMLExtensionsForVulkan` - - `NSS` -3. Re-open Unreal Engine. When prompted, confirm plugin integration. -4. Rebuild your project in Visual Studio from source. -5. Verify the installation by opening the Plugins view in Unreal Engine, and making sure the checkbox is selected for both `NSS` and `NNERuntimeRDGMLExtensionsForVulkan` as shown. Restart Unreal Engine if prompted. +2. Re-open Unreal Engine. When prompted, confirm plugin integration. +3. Rebuild your project in Visual Studio from source. +4. Verify the installation by opening the Plugins view in Unreal Engine, and making sure the checkbox is selected for both `NSS` and `NNERuntimeRDGMLExtensionsForVulkan` as shown. Restart Unreal Engine if prompted. ![Unreal Engine plugins window showing NSS and NNERuntimeRDGMLExtensionsForVulkan enabled#center](./images/verify_plugin_enabled.png "Figure 5: Verify plugin installation in Unreal Engine.") -With the emulation layers and plugins configured, you're ready to run Neural Super Sampling in Unreal Engine. Continue to the next section to test the integration. \ No newline at end of file + +## For Unreal 5.4 + +1. Copy the downloaded and extracted `.zip` into the new `Plugins` directory: + - `UE5.4` +2. Re-open Unreal Engine. When prompted, confirm plugin integration. +3. Rebuild your project in Visual Studio from source. +4. Verify the installation by opening the Plugins view in Unreal Engine, and making sure the checkbox is selected for `NSS`. Restart Unreal Engine if prompted. + +With the emulation layers and plugins configured, you're ready to run Neural Super Sampling in Unreal Engine. Continue to the next section to test the integration. diff --git a/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/_index.md b/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/_index.md index 26edbc8611..02b82c26fb 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/_index.md +++ b/content/learning-paths/mobile-graphics-and-gaming/nss-unreal/_index.md @@ -15,7 +15,7 @@ learning_objectives: prerequisites: - Windows 11 - - Unreal Engine 5.5 (Templates and Feature Pack enabled) + - Unreal Engine 5.4 or 5.5 (Templates and Feature Pack enabled) - Visual Studio 2022 (with Desktop Development with C++ and .NET desktop build tools) diff --git a/content/learning-paths/servers-and-cloud-computing/bitmap_scan_sve2/_index.md b/content/learning-paths/servers-and-cloud-computing/bitmap_scan_sve2/_index.md index 9da8cbe6b4..09b634a31d 100644 --- a/content/learning-paths/servers-and-cloud-computing/bitmap_scan_sve2/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/bitmap_scan_sve2/_index.md @@ -46,5 +46,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/_index.md b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/_index.md new file mode 100644 index 0000000000..dcc30f94be --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/_index.md @@ -0,0 +1,64 @@ +--- +title: Deploy Cassandra on a Google Axion C4A virtual machine + +draft: true +cascade: + draft: true + +minutes_to_complete: 30 + +who_is_this_for: This is an introductory topic for software developers migrating Cassandra workloads from x86_64 to Arm-based servers, specifically on Google Cloud C4A virtual machines built on Axion processors. + + +learning_objectives: + - Provision an Arm-based SUSE SLES virtual machine on Google Cloud (C4A with Axion processors) + - Install and configure Apache Cassandra on a SUSE Arm64 (C4A) instance + - Validate Cassandra functionality using CQLSH and baseline keyspace/table operations + - Benchmark Cassandra performance using cassandra-stress for read and write workloads on Arm64 (Aarch64) architecture + +prerequisites: + - A [Google Cloud Platform (GCP)](https://cloud.google.com/free) account with billing enabled + - Familiarity with Cassandra architecture, replication, and [Cassandra partitioning & event-driven I/O](https://cassandra.apache.org/doc/stable/cassandra/architecture/) + +author: Pareena Verma + +##### Tags +skilllevels: Introductory +subjects: Databases +cloud_service_providers: Google Cloud + +armips: + - Neoverse + +tools_software_languages: + - Apache Cassandra + - Java + - cqlsh + - cassandra-stress + +operatingsystems: + - Linux + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +further_reading: + - resource: + title: Google Cloud documentation + link: https://cloud.google.com/docs + type: documentation + + - resource: + title: Apache Cassandra documentation + link: https://cassandra.apache.org/doc/latest/ + type: documentation + + - resource: + title: Cassandra-stress documentation + link: https://cassandra.apache.org/doc/4.0/cassandra/tools/cassandra_stress.html + type: documentation + +weight: 1 +layout: "learningpathall" +learning_path_main_page: "yes" +--- diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/backgraound.md b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/backgraound.md new file mode 100644 index 0000000000..ba3c04462c --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/backgraound.md @@ -0,0 +1,23 @@ +--- +title: Getting started with Cassandra on Google Axion C4A (Arm Neoverse-V2) + +weight: 2 + +layout: "learningpathall" +--- + +## Google Axion C4A Arm instances in Google Cloud + +Google Axion C4A is a family of Arm-based virtual machines built on Google’s custom Axion CPU, which is based on Arm Neoverse-V2 cores. Designed for high-performance and energy-efficient computing, these virtual machines offer strong performance for modern cloud workloads such as CI/CD pipelines, microservices, media processing, and general-purpose applications. + +The C4A series provides a cost-effective alternative to x86 virtual machines while leveraging the scalability and performance benefits of the Arm architecture in Google Cloud. + +To learn more about Google Axion, refer to the [Introducing Google Axion Processors, our new Arm-based CPUs](https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu) blog. + +## Cassandra + +Cassandra is a highly scalable, distributed NoSQL database designed to handle large amounts of data across many commodity servers without a single point of failure. + +It provides high availability, fault tolerance, and linear scalability, making it ideal for real-time big data applications and high-throughput workloads. + +Cassandra is widely used for time-series data, IoT applications, recommendation engines, and large-scale cloud services. Learn more from the [Cassandra official website](https://cassandra.apache.org/) and its [documentation](https://cassandra.apache.org/doc/latest/). diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/baseline.md b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/baseline.md new file mode 100644 index 0000000000..dfdf7a687a --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/baseline.md @@ -0,0 +1,119 @@ +--- +title: Apache Cassandra baseline testing on Google Axion C4A Arm Virtual machine +weight: 5 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + + +Since Cassandra has been successfully installed on your GCP C4A Arm virtual machine, follow these steps to verify that it is running and functioning properly. + +## Baseline Testing for Apache Cassandra + +This guide helps verify the installation and perform baseline testing of **Apache Cassandra**. + +## Start Cassandra + +Run Cassandra in the background: + +```console +cassandra -R +``` + +The `-R` flag allows Cassandra to run in the background as a daemon, so you can continue using the terminal. The first startup may take **30–60 seconds** as it initializes the necessary files and processes. + +Check logs to ensure Cassandra started successfully: + +```console +tail -f ~/cassandra/logs/system.log +``` +Look for the message **"Startup complete"**, which indicates Cassandra is fully initialized. + +### Check Cassandra Status +```console +nodetool status +``` +You should see an output similar to: + +```output +Datacenter: datacenter1 +======================= +Status=Up/Down +|/ State=Normal/Leaving/Joining/Moving +-- Address Load Tokens Owns (effective) Host ID Rack +UN 127.0.0.1 162.51 KiB 16 100.0% 78774686-39f3-47e7-87c3-3abc4f02a835 rack1 +``` +The `nodetool status` command displays the health and status of your Cassandra node(s). For a single-node setup, the output should indicate that the node is **Up (U)** and **Normal (N)**. This confirms that your Cassandra instance is running and ready to accept queries. + +### Connect with CQLSH (Cassandra Query Shell) +**cqlsh** is the interactive command-line shell for Cassandra. It allows you to run Cassandra Query Language (CQL) commands to interact with your database, create keyspaces and tables, insert data, and perform queries. + +```console +cqlsh +``` +You’ll enter the CQL (Cassandra Query Language) shell. + +### Create a Keyspace (like a database) +A **keyspace** in Cassandra is similar to a database in SQL systems. Here, we create a simple keyspace `testks` with a **replication factor of 1**, meaning data will only be stored on one node (suitable for a single-node setup). + +```sql +CREATE KEYSPACE testks WITH replication = {'class':'SimpleStrategy','replication_factor' : 1}; +``` +Check if created: + +```sql +DESCRIBE KEYSPACES; +``` + +You should see an output similar to: + +```output +cqlsh> DESCRIBE KEYSPACES; + +system system_distributed system_traces system_virtual_schema +system_auth system_schema system_views testks +``` + +### Create a Table +Tables in Cassandra are used to store structured data. This step creates a `users` table with three columns: `id` (unique identifier), `name` (text), and `age` (integer). The `id` column is the primary key. + +```sql +USE testks; + +CREATE TABLE users ( + id UUID PRIMARY KEY, + name text, + age int +); +``` + +### Insert Data +We insert two sample rows into the `users` table. The `uuid()` function generates a unique identifier for each row, which ensures that every user entry has a unique primary key. + +```sql +INSERT INTO users (id, name, age) VALUES (uuid(), 'Alice', 30); +INSERT INTO users (id, name, age) VALUES (uuid(), 'Bob', 25); +``` + +### Query Data +This command retrieves all rows from the `users` table. Successful retrieval confirms that data insertion works correctly and that queries return expected results. + +```sql +SELECT * FROM users; +``` + +You should see an output similar to: + +```output + id | age | name +--------------------------------------+-----+------- + c08dafde-17f0-4a4a-82b8-54455bb07836 | 25 | Bob + d47eb93c-3988-4aa1-bc85-9561500a6893 | 30 | Alice + +(2 rows) +``` + +This baseline test verifies that Cassandra 5.0.5 is installed and running correctly on the VM. It confirms the node status, allows connection via `cqlsh`, and ensures basic operations like creating a keyspace, table, inserting, and querying data work as expected. + +Please now press "Ctrl-D" to exit the Cassandra Query Shell. diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/benchmnarking.md b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/benchmnarking.md new file mode 100644 index 0000000000..b86e744d7e --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/benchmnarking.md @@ -0,0 +1,367 @@ +--- +title: Cassandra Benchmarking +weight: 6 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Cassandra Benchmarking by Cassandra-Stress +Cassandra benchmarking can be performed using the built-in `cassandra-stress` tool, which helps measure database performance under different workloads such as write, read, and mixed operations. + +### Steps for Cassandra Benchmarking with Cassandra-Stress +**Verify cassandra-stress Installation:** + +Cassandra comes with a built-in tool called **cassandra-stress** that is used for testing performance. It is usually located in the `tools/bin/` folder of your Cassandra installation. + +```console +ls ~/cassandra/tools/bin | grep cassandra-stress +``` +If you see cassandra-stress in the list, it means the tool is installed and ready to use. + +**Run the version check:** + +To make sure the tool works correctly, check its help options. + +```console +~/cassandra/tools/bin/cassandra-stress help +``` +You should see output similar to the following: + +```output +Usage: cassandra-stress [options] +Help usage: cassandra-stress help + +---Commands--- +read : Multiple concurrent reads - the cluster must first be populated by a write test +write : Multiple concurrent writes against the cluster +mixed : Interleaving of any basic commands, with configurable ratio and distribution - the cluster must first be populated by a write test +counter_write : Multiple concurrent updates of counters. +counter_read : Multiple concurrent reads of counters. The cluster must first be populated by a counterwrite test. +user : Interleaving of user provided queries, with configurable ratio and distribution +help : Print help for a command or option +print : Inspect the output of a distribution definition +version : Print the version of cassandra stress +``` +If the tool is working, you will see a list of commands and options that you can use to run benchmarks. +This confirms that your setup is correct and you’re ready to start testing Cassandra’s performance. + +### Basic Write Test +Insert 10,000 rows with 50 concurrent threads using `cassandra-stress`: + +```console +~/cassandra/tools/bin/cassandra-stress write n=10000 -rate threads=50 +``` +- **write** → Performs only write operations on the Cassandra cluster. +- **n=10000** → Specifies the number of rows to insert during the benchmark test. +- **-rate threads=50** → Sets the number of concurrent worker threads simulating multiple clients writing to the cluster. + +You should see output similar to the following: + +```output +******************** Stress Settings ******************** +Command: + Type: write + Count: 10,000 + No Warmup: false + Consistency Level: LOCAL_ONE + Target Uncertainty: not applicable + Key Size (bytes): 10 + Counter Increment Distibution: add=fixed(1) +Rate: + Auto: false + Thread Count: 50 + OpsPer Sec: 0 +Population: + Sequence: 1..10000 + Order: ARBITRARY + Wrap: true +Insert: + Revisits: Uniform: min=1,max=1000000 + Visits: Fixed: key=1 + Row Population Ratio: Ratio: divisor=1.000000;delegate=Fixed: key=1 + Batch Type: not batching +Columns: + Max Columns Per Key: 5 + Column Names: [C0, C1, C2, C3, C4] + Comparator: AsciiType + Timestamp: null + Variable Column Count: false + Slice: false + Size Distribution: Fixed: key=34 + Count Distribution: Fixed: key=5 +Errors: + Ignore: false + Tries: 10 +Log: + No Summary: false + No Settings: false + File: null + Interval Millis: 1000 + Level: NORMAL +Mode: + API: JAVA_DRIVER_NATIVE + Connection Style: CQL_PREPARED + Protocol Version: V5 + Username: null + Password: null + Auth Provide Class: null + Max Pending Per Connection: 128 + Connections Per Host: 8 + Compression: NONE +Node: + Nodes: [localhost] + Is White List: false + Datacenter: null +Schema: + Keyspace: keyspace1 + Replication Strategy: org.apache.cassandra.locator.SimpleStrategy + Replication Strategy Options: {replication_factor=1} + Table Compression: null + Table Compaction Strategy: null + Table Compaction Strategy Options: {} +Transport: + Truststore: null + Truststore Password: null + Keystore: null + Keystore Password: null + SSL Protocol: TLS + SSL Algorithm: null + SSL Ciphers: TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA +Port: + Native Port: 9042 + JMX Port: 7199 +JMX: + Username: null + Password: *not set* +Graph: + File: null + Revision: unknown + Title: null + Operation: WRITE +TokenRange: + Wrap: false + Split Factor: 1 +Credentials file: + File: *not set* + CQL username: *not set* + CQL password: *not set* + JMX username: *not set* + JMX password: *not set* + Transport truststore password: *not set* + Transport keystore password: *not set* +Reporting: + Output frequency: 1s + Header frequency: *not set* + +Connected to cluster: Test Cluster, max pending requests per connection 128, max connections per host 8 +Datacenter: datacenter1; Host: localhost/127.0.0.1:9042; Rack: rack1 +Created keyspaces. Sleeping 1s for propagation. +Sleeping 2s... +Warming up WRITE with 2500 iterations... +Running WRITE with 50 threads for 10000 iteration +type total ops, op/s, pk/s, row/s, mean, med, .95, .99, .999, max, time, stderr, errors, gc: #, max ms, sum ms, sdv ms, mb +total, 10000, 10690, 10690, 10690, 3.7, 2.8, 9.5, 16.7, 28.9, 38.4, 0.9, 0.00000, 0, 0, 0, 0, 0, 0 + + +Results: +Op rate : 10,690 op/s [WRITE: 10,690 op/s] +Partition rate : 10,690 pk/s [WRITE: 10,690 pk/s] +Row rate : 10,690 row/s [WRITE: 10,690 row/s] +Latency mean : 3.7 ms [WRITE: 3.7 ms] +Latency median : 2.8 ms [WRITE: 2.8 ms] +Latency 95th percentile : 9.5 ms [WRITE: 9.5 ms] +Latency 99th percentile : 16.7 ms [WRITE: 16.7 ms] +Latency 99.9th percentile : 28.9 ms [WRITE: 28.9 ms] +Latency max : 38.4 ms [WRITE: 38.4 ms] +Total partitions : 10,000 [WRITE: 10,000] +Total errors : 0 [WRITE: 0] +Total GC count : 0 +Total GC memory : 0 B +Total GC time : 0.0 seconds +Avg GC time : NaN ms +StdDev GC time : 0.0 ms +Total operation time : 00:00:00 + +END +``` + +### Read Test +The following command runs a **read benchmark** on your Cassandra database using `cassandra-stress`. It simulates multiple clients reading from the cluster at the same time and records performance metrics such as **throughput** and **latency**. + +```console +~/cassandra/tools/bin/cassandra-stress read n=10000 -rate threads=50 +``` +You should see output similar to the following: +```output +******************** Stress Settings ******************** +Command: + Type: read + Count: 10,000 + No Warmup: false + Consistency Level: LOCAL_ONE + Target Uncertainty: not applicable + Key Size (bytes): 10 + Counter Increment Distibution: add=fixed(1) +Rate: + Auto: false + Thread Count: 50 + OpsPer Sec: 0 +Population: + Distribution: Gaussian: min=1,max=10000,mean=5000.500000,stdev=1666.500000 + Order: ARBITRARY + Wrap: false +Insert: + Revisits: Uniform: min=1,max=1000000 + Visits: Fixed: key=1 + Row Population Ratio: Ratio: divisor=1.000000;delegate=Fixed: key=1 + Batch Type: not batching +Columns: + Max Columns Per Key: 5 + Column Names: [C0, C1, C2, C3, C4] + Comparator: AsciiType + Timestamp: null + Variable Column Count: false + Slice: false + Size Distribution: Fixed: key=34 + Count Distribution: Fixed: key=5 +Errors: + Ignore: false + Tries: 10 +Log: + No Summary: false + No Settings: false + File: null + Interval Millis: 1000 + Level: NORMAL +Mode: + API: JAVA_DRIVER_NATIVE + Connection Style: CQL_PREPARED + Protocol Version: V5 + Username: null + Password: null + Auth Provide Class: null + Max Pending Per Connection: 128 + Connections Per Host: 8 + Compression: NONE +Node: + Nodes: [localhost] + Is White List: false + Datacenter: null +Schema: + Keyspace: keyspace1 + Replication Strategy: org.apache.cassandra.locator.SimpleStrategy + Replication Strategy Options: {replication_factor=1} + Table Compression: null + Table Compaction Strategy: null + Table Compaction Strategy Options: {} +Transport: + Truststore: null + Truststore Password: null + Keystore: null + Keystore Password: null + SSL Protocol: TLS + SSL Algorithm: null + SSL Ciphers: TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA +Port: + Native Port: 9042 + JMX Port: 7199 +JMX: + Username: null + Password: *not set* +Graph: + File: null + Revision: unknown + Title: null + Operation: READ +TokenRange: + Wrap: false + Split Factor: 1 +Credentials file: + File: *not set* + CQL username: *not set* + CQL password: *not set* + JMX username: *not set* + JMX password: *not set* + Transport truststore password: *not set* + Transport keystore password: *not set* +Reporting: + Output frequency: 1s + Header frequency: *not set* + +Sleeping 2s... +Warming up READ with 2500 iterations... +Connected to cluster: Test Cluster, max pending requests per connection 128, max connections per host 8 +Datacenter: datacenter1; Host: localhost/127.0.0.1:9042; Rack: rack1 +Running READ with 50 threads for 10000 iteration +type total ops, op/s, pk/s, row/s, mean, med, .95, .99, .999, max, time, stderr, errors, gc: #, max ms, sum ms, sdv ms, mb +total, 1540, 1540, 1540, 1540, 8.1, 6.2, 19.2, 38.4, 73.3, 80.9, 1.0, 0.00000, 0, 0, 0, 0, 0, 0 +total, 9935, 8395, 8395, 8395, 5.9, 4.2, 16.7, 33.1, 57.3, 86.0, 2.0, 0.48892, 0, 0, 0, 0, 0, 0 +total, 10000, 4217, 4217, 4217, 8.5, 4.2, 27.1, 27.4, 27.4, 27.4, 2.0, 1.89747, 0, 0, 0, 0, 0, 0 + + +Results: +Op rate : 4,962 op/s [READ: 4,962 op/s] +Partition rate : 4,962 pk/s [READ: 4,962 pk/s] +Row rate : 4,962 row/s [READ: 4,962 row/s] +Latency mean : 6.3 ms [READ: 6.3 ms] +Latency median : 4.5 ms [READ: 4.5 ms] +Latency 95th percentile : 17.4 ms [READ: 17.4 ms] +Latency 99th percentile : 33.4 ms [READ: 33.4 ms] +Latency 99.9th percentile : 59.6 ms [READ: 59.6 ms] +Latency max : 86.0 ms [READ: 86.0 ms] +Total partitions : 10,000 [READ: 10,000] +Total errors : 0 [READ: 0] +Total GC count : 0 +Total GC memory : 0 B +Total GC time : 0.0 seconds +Avg GC time : NaN ms +StdDev GC time : 0.0 ms +Total operation time : 00:00:02 + +END +``` + +## Benchmark Results Table Explained: + +- **Op rate (operations per second):** The number of read operations Cassandra successfully executed per second. +- **Partition rate:** Number of partitions read per second. Since this is a read test, the partition rate equals the op rate. +- **Row rate:** Number of rows read per second. Again, for this test it equals the op rate. +- **Latency mean:** The average time taken for each read request to complete. +- **Latency median:** The 50th percentile latency — half of the operations completed faster than this time. +- **Latency max:** The slowest single read request during the test. +- **Total partitions:** The total number of partitions read during the test. +- **Total errors:** Number of failed read operations. +- **GC metrics (Garbage Collection):** Shows whether JVM garbage collection paused Cassandra during the test. +- **Total operation time:** The total wall-clock time taken to run the benchmark. + +### Benchmark summary on Arm64 +Results from the earlier run on the `c4a-standard-4` (4 vCPU, 16 GB memory) Arm64 VM in GCP (SuSE shown, Ubuntu results were very similar): + +| Metric | Write Test | Read Test | +|----------------------------|----------------------|----------------------| +| Operation Rate (op/s) | 10,690 | 4,962 | +| Partition Rate (pk/s) | 10,690 | 4,962 | +| Row Rate (row/s) | 10,690 | 4,962 | +| Latency Mean | 3.7 ms | 6.3 ms | +| Latency Median | 2.8 ms | 4.5 ms | +| Latency 95th Percentile | 9.5 ms | 17.4 ms | +| Latency 99th Percentile | 16.7 ms | 33.4 ms | +| Latency 99.9th Percentile | 28.9 ms | 59.6 ms | +| Latency Max | 38.4 ms | 86.0 ms | +| Total Partitions | 10,000 | 10,000 | +| Total Errors | 0 | 0 | +| Total GC Count | 0 | 0 | +| Total GC Memory | 0 B | 0 B | +| Total GC Time | 0.0 s | 0.0 s | +| Total Operation Time | 0:00:00 | 0:00:02 | + +### Cassandra performance benchmarking notes +When examining the benchmark results, you will notice that on the Google Axion C4A Arm-based instances: + +- The write operations achieved a high throughput of **10,690 op/s**, while read operations reached **4,962 op/s** on the `c4a-standard-4` Arm64 VM. +- Latency for writes was very low (mean: **3.7 ms**) compared to reads (mean: **6.3 ms**), indicating fast write processing on this Arm64 VM. +- The 95th and 99th percentile latencies show consistent performance, with writes significantly faster than reads. +- There were no errors or GC overhead, confirming stable and reliable benchmarking results. + +Overall, the Arm64 VM provides efficient and predictable performance, making it suitable for high-throughput Cassandra workloads. diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-shell.png b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-shell.png new file mode 100644 index 0000000000..7e2fc3d1b5 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-shell.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-ssh.png b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-ssh.png new file mode 100644 index 0000000000..597ccd7fea Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-ssh.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-vm.png b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-vm.png new file mode 100644 index 0000000000..0d1072e20d Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/images/gcp-vm.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/installation.md b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/installation.md new file mode 100644 index 0000000000..a90264da85 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/installation.md @@ -0,0 +1,77 @@ +--- +title: Install Apache Cassandra +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Apache Cassandra Installation on Ubuntu or SuSE VM +This guide will help you install **Apache Cassandra** on a Ubuntu or SuSE Linux virtual machine. Cassandra is a highly scalable NoSQL database designed for high availability and fault tolerance. + +### Update System Packages +Updating system packages ensures that your system has the latest security patches and dependencies required for Cassandra. + +{{< tabpane code=true >}} + {{< tab header="Ubuntu" language="bash">}} +sudo apt update + {{< /tab >}} + {{< tab header="SUSE Linux" language="bash">}} +sudo zypper refresh +sudo zypper update -y + {{< /tab >}} +{{< /tabpane >}} + +### Install Java +Cassandra requires a Java runtime environment. You can use either Java 11 or Java 17. This example uses Java 17 for optimal performance and compatibility with Cassandra 5.0.5. + +{{< tabpane code=true >}} + {{< tab header="Ubuntu" language="bash">}} +sudo apt install -y openjdk-17-jdk + {{< /tab >}} + {{< tab header="SUSE Linux" language="bash">}} +sudo zypper install -y java-17-openjdk java-17-openjdk-devel + {{< /tab >}} +{{< /tabpane >}} + +### Download Cassandra +Download the latest stable release of Apache Cassandra 5.0.5 from the official Apache repository. + +```console +wget https://downloads.apache.org/cassandra/5.0.5/apache-cassandra-5.0.5-bin.tar.gz +``` +{{% notice Note %}} +Apache Cassandra 5.0 is a major release introducing significant performance, usability, and scalability enhancements. Key features include Storage Attached Indexes (SAI) for flexible querying, Trie-based memtables/SSTables for better efficiency, and the Unified Compaction Strategy (UCS) for automated data management. It also supports JDK 17 for up to 20% performance gains and adds vector search for AI applications. The release marks the end-of-life for the 3.x series, urging users to upgrade for continued support. +You can view [this release note](https://cassandra.apache.org/_/blog/Apache-Cassandra-5.0-Announcement.html) + +The [Arm Ecosystem Dashboard](https://developer.arm.com/ecosystem-dashboard/) recommends PHP version 5.0.0 as the minimum recommended on the Arm platforms. +{{% /notice %}} + +### Extract and Setup Cassandra +Extract the downloaded archive and move it to a dedicated directory for Cassandra. + +```console +tar -xvzf apache-cassandra-5.0.5-bin.tar.gz +mv apache-cassandra-5.0.5 ~/cassandra +``` + +### Enable Running Cassandra from Anywhere +To run Cassandra commands from any location, add the `bin` directory to your PATH environment variable: + +```console +echo 'export PATH="$HOME/cassandra/bin:$PATH"' >> ~/.bashrc +source ~/.bashrc +``` +Now you can run `Cassandra` or `cqlsh` from any terminal without specifying the full path. + +### Verify Installation +Check the installed Cassandra version to confirm the installation: + +```console +cassandra -v +``` +You should see an output similar to: +```output +5.0.5 +``` +Cassandra's installation is complete. You can now proceed with the baseline testing. diff --git a/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/instance.md b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/instance.md new file mode 100644 index 0000000000..bd8a3d9b32 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/cassandra-on-gcp/instance.md @@ -0,0 +1,96 @@ +--- +title: Create a Google Axion C4A Arm virtual machine on GCP +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Overview + +In this section, you will learn how to provision a Google Axion C4A Arm virtual machine on Google Cloud Platform (GCP) using the `c4a-standard-4` (4 vCPUs, 16 GB memory) machine type in the Google Cloud Console. + +{{% notice Note %}} +For support on GCP setup, see the Learning Path [Getting started with Google Cloud Platform](https://learn.arm.com/learning-paths/servers-and-cloud-computing/csp/google/). +{{% /notice %}} + +## Provision a Google Axion C4A Arm VM in Google Cloud Console + +To create a virtual machine based on the C4A instance type: +- Navigate to the [Google Cloud Console](https://console.cloud.google.com/). +- Go to **Compute Engine > VM Instances** and select **Create Instance**. +- Under **Machine configuration**: + - Populate fields such as **Instance name**, **Region**, and **Zone**. + - Set **Series** to `C4A`. + - Select `c4a-standard-4` for machine type. + + ![Create a Google Axion C4A Arm virtual machine in the Google Cloud Console with c4a-standard-4 selected alt-text#center](images/gcp-vm.png "Creating a Google Axion C4A Arm virtual machine in Google Cloud Console") + + +- Under **OS and Storage**, select **Change**, then choose an Arm64-based OS image. For this Learning Path, use **SUSE Linux Enterprise Server** or **Ubuntu**. + - If using use **SUSE Linux Enterprise Server**. Select "Pay As You Go" for the license type. + - If using **Ubuntu**, under the **Version** tab, please scroll down and select the aarch64 version of **Ubuntu 22.04 LTS**. +- Once appropriately selected, please Click **Select**. +- Under **Networking**, enable **Allow HTTP traffic**. +- Click **Create** to launch the instance. +- Once created, you should see a "SSH" option to the right in your list of VM instances. Click on this to launch a SSH shell into your VM instance: + +![Invoke a SSH session via your browser alt-text#center](images/gcp-ssh.png "Invoke a SSH session into your running VM instance") + +- A window from your browser should come up and you should now see a shell into your VM instance: + +![Terminal Shell in your VM instance alt-text#center](images/gcp-shell.png "Terminal shell in your VM instance") + +## Explore your instance + +### Run uname + +Use the [uname](https://en.wikipedia.org/wiki/Uname) utility to verify that you are using an Arm-based server. For example: + +```console +uname -m +``` +will identify the host machine as `aarch64`. + +### Run hello world + +Install the `gcc` compiler: + +{{< tabpane code=true >}} + {{< tab header="Ubuntu" language="bash">}} +sudo apt update +sudo apt install -y build-essential + {{< /tab >}} + {{< tab header="SUSE Linux" language="bash">}} +sudo zypper refresh +sudo zypper install -y gcc + {{< /tab >}} +{{< /tabpane >}} + +Using a text editor of your choice, create a file named `hello.c` with the contents below: + +```C +#include +int main(){ + printf("hello world\n"); + return 0; +} +``` +Build and run the application: + +```console +gcc hello.c -o hello +./hello +``` + +The output is shown below: + +```output +hello world +``` + +## Automating Arm Based Infrastructure Deployment + +Cloud infrastructure deployment is typically done via Infrastructure as code (IaC) automation tools. There are Cloud Service Provider specific tools like [Google Cloud Deployment Manager](https://cloud.google.com/deployment-manager/docs/). + +There are also Cloud Service Provider agnostic tools like [Terraform](https://www.terraform.io/).There is a [deploying Arm VMs on (GCP) using Terraform learning path](/learning-paths/servers-and-cloud-computing/gcp) that should be reviewed next. \ No newline at end of file diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/_index.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/_index.md new file mode 100644 index 0000000000..264a2b50d8 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/_index.md @@ -0,0 +1,76 @@ +--- +title: CircleCI Arm Native Workflows on SUSE Arm (GCP VM) + +draft: true +cascade: + draft: true + +minutes_to_complete: 45 + +who_is_this_for: This learning path is intended for software developers and DevOps engineers looking to set up and run CircleCI Arm native workflows on SUSE Linux Arm64 VMs, specifically on Google Cloud C4A with Axion processors, using self-hosted runners. + +learning_objectives: + - Provision a SUSE Arm64 virtual machine on Google Cloud (C4A with Axion processors) + - Install and configure CircleCI self-hosted machine runners on Arm64 + - Create a cloud-native Node.js demo app to run on the self-hosted Arm runner + - Write and execute a CircleCI workflow using a custom Arm resource class + - Test CircleCI workflows locally and understand job execution on Arm64 runners + +prerequisites: + - A [Google Cloud Platform (GCP)](https://cloud.google.com/free) account with billing enabled + - Basic familiarity with Linux command line, Node.js, and npm + - Basic understanding of CircleCI concepts such as + [workflows](https://circleci.com/docs/guides/orchestrate/workflows/), + [jobs](https://circleci.com/docs/guides/orchestrate/jobs-steps/), + [resource classes](https://circleci.com/docs/guides/execution-managed/resource-class-overview/), and + [runners](https://circleci.com/docs/guides/execution-runner/runner-overview/) + + +author: Pareena Verma + +##### Tags +skilllevels: Introductory +subjects: CI-CD +cloud_service_providers: Google Cloud + +armips: + - Neoverse + +tools_software_languages: + - CircleCI + - Node.js + - npm + - Express + - Docker + +operatingsystems: + - Linux + +# ================================================================================ +# FIXED, DO NOT MODIFY +# ================================================================================ +further_reading: + - resource: + title: Google Cloud documentation + link: https://cloud.google.com/docs + type: documentation + + - resource: + title: CircleCI Self-Hosted Runner Documentation + link: https://circleci.com/docs/guides/execution-runner/install-machine-runner-3-on-linux/ + type: documentation + + - resource: + title: CircleCI CLI Documentation + link: https://circleci.com/docs/guides/toolkit/local-cli/ + type: documentation + + - resource: + title: Node.js Express Documentation + link: https://expressjs.com/ + type: documentation + +weight: 1 +layout: "learningpathall" +learning_path_main_page: "yes" +--- diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/background.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/background.md new file mode 100644 index 0000000000..fa99445bc3 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/background.md @@ -0,0 +1,25 @@ +--- +title: Getting started with CircleCI on Google Axion C4A (Arm Neoverse-V2) + +weight: 2 + +layout: "learningpathall" +--- + +## Google Axion C4A Arm instances in Google Cloud + +Google Axion C4A is a family of Arm-based virtual machines built on Google’s custom Axion CPU, which is based on Arm Neoverse-V2 cores. Designed for high-performance and energy-efficient computing, these virtual machines offer strong performance for modern cloud workloads such as CI/CD pipelines, microservices, media processing, and general-purpose applications. + +The C4A series provides a cost-effective alternative to x86 virtual machines while leveraging the scalability and performance benefits of the Arm architecture in Google Cloud. + +To learn more about Google Axion, refer to the [Introducing Google Axion Processors, our new Arm-based CPUs](https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu) blog. + +## CircleCI + +CircleCI is a cloud-based **Continuous Integration and Continuous Delivery (CI/CD)** platform that automates the process of **building, testing, and deploying software**. + +It integrates with popular version control systems like **GitHub**, **Bitbucket**, and **GitLab**, and allows developers to define custom workflows in a `.circleci/config.yml` file using **YAML syntax**. + +CircleCI supports multiple environments, including **Docker**, **Linux**, **macOS**, and **Windows**, and offers advanced features like **parallelism**, **caching**, and **matrix builds** to speed up pipelines and improve efficiency. + +It is widely used for **automating tests, running builds, deploying applications, and ensuring code quality** in modern development workflows. Learn more from the [CircleCI official website](https://circleci.com/) and its [documentation](https://circleci.com/docs/). diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/circleci-arm64-cloud-demo.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/circleci-arm64-cloud-demo.md new file mode 100644 index 0000000000..780b8d1e96 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/circleci-arm64-cloud-demo.md @@ -0,0 +1,214 @@ +--- +title: CircleCI Arm64 Cloud-Native Demo +weight: 8 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Deploying a Cloud-Native Arm64 Node.js App using self-hosted CircleCI Runner on GCP + +This guide walks through building and testing a simple **Node.js web app** using a **self-hosted CircleCI Arm64 runner** on a **GCP SUSE Arm64 VM**. + + +### Install and Configure Docker +Ensure Docker is installed, started, and accessible by both your user and the CircleCI runner service. + +- **Install Docker**: Refresh your package manager and install Docker on your system. +- **Enable Docker Service**: Ensure Docker starts on boot and is running. +- **Add User to Docker Group**: Add both your user and the CircleCI runner to the Docker group to grant access. + +```console +sudo zypper refresh +sudo zypper install docker +sudo systemctl enable docker +sudo systemctl start docker +sudo systemctl status docker +sudo usermod -aG docker $USER +sudo usermod -aG docker circleci +``` +### Validate Docker access +This command switches to the CircleCI user and checks if Docker is working correctly. + +```console +sudo -u circleci -i +docker ps +exit +``` + +### Verify Docker Permissions +Check Docker socket permissions and ensure that the CircleCI runner is active and running. + +```console +ls -l /var/run/docker.sock +ps -aux | grep circleci-runner +``` +- **Check Docker Socket Permissions**: This command ensures the Docker socket is accessible. +- **Verify CircleCI Runner Process**: Confirm the CircleCI runner service is active and running. + +### **Install Node.js and npm** + +Before proceeding with the app setup, please make sure **Node.js** and **npm** (Node.js package manager) are installed on the VM, as they are required to run your Node.js app. + +- **Install Node.js**: Use the official Node.js package for Arm64 architecture. +- **Install npm**: npm is automatically installed when Node.js is installed. + +```console +sudo zypper install nodejs +sudo zypper install npm +``` +### Clone Your App Repository +Clone your application repository (or create one locally): + +```console +git clone https://github.com//arm64-node-demo.git +cd arm64-node-demo +``` + +### Create a Dockerfile +In the root of your project, create a `Dockerfile` that defines how to build and run your application container. + +```dockerfile +# Dockerfile +FROM arm64v8/node:20-alpine +WORKDIR /app +COPY package*.json ./ +RUN npm install +COPY . . +EXPOSE 3000 +CMD ["npm", "start"] +``` +- **Use Arm64 Node.js Image**: The `arm64v8/node` image is specifically designed for Arm64 architecture. +- **Install Dependencies**: `RUN npm install` installs the project dependencies listed in `package.json`. +- **Expose Port**: The app will run on port 3000. +- **Start the App**: The container will execute `npm start` to launch the Node.js server. + +### Add a CircleCI Configuration +Create a `.circleci/config.yml` file to define the CircleCI pipeline for building and testing your Node.js app on Arm64 architecture. + +```yaml +version: 2.1 + +jobs: + arm64-demo: + machine: true + resource_class: + steps: + - checkout + - run: + name: Show Architecture + command: | + ARCH=$(uname -m) + echo "Detected architecture: $ARCH" + if [ "$ARCH" = "aarch64" ]; then + echo "✅ Running on ARM64 architecture!" + else + echo "Not running on ARM64!" + exit 1 + fi + - run: + name: Build Docker Image + command: docker build -t arm64-node-demo . + - run: + name: Run Docker Container + command: docker run -d -p 3000:3000 arm64-node-demo + - run: + name: Test Endpoint + command: | + sleep 5 + curl http://localhost:3000 + +workflows: + version: 2 + arm64-workflow: + jobs: + - arm64-demo +``` +- **arm64-demo Job**: This job checks if the architecture is Arm64, builds the Docker image, runs it in a container, and tests the app endpoint. +- **resource_class**: Specify the resource class for the CircleCI runner (e.g., a custom Arm64 runner if using self-hosted). +- **Test Endpoint**: The job sends a request to the app to verify it’s working. + +### Node.js Application +Here’s the basic code for the Node.js app. + +`index.js`: + +```javascript +const express = require('express'); +const app = express(); +const PORT = process.env.PORT || 3000; + +app.get('/', (req, res) => { + res.send('Hello from ARM64 Node.js app! 🚀'); +}); + +app.listen(PORT, () => { + console.log(`Server running on port ${PORT}`); +}); +``` +package.json + +```json +{ + "name": "arm64-node-demo", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "start": "node index.js", + "test": "echo \"No tests yet\"" + }, + "dependencies": { + "express": "^4.18.2" + } +} +``` +- **Express Server**: The application uses Express.js to handle HTTP requests and respond with a simple message. +- **Package Dependencies**: The app requires the `express` package for handling HTTP requests. + +### Push Code to GitHub + +Once all files (`Dockerfile`, `index.js`, `package.json`, `.circleci/config.yml`) are ready, push your project to GitHub so CircleCI can build it automatically. + +```console +git add . +git commit -m "Add ARM64 CircleCI Node.js demo project" +git push -u origin main +``` +- **Add and Commit Changes**: Stage and commit your project files. +- **Push to GitHub**: Push your code to the GitHub repository so that CircleCI can trigger the build. + +### Start CircleCI Runner and Execute Job +Ensure that your CircleCI runner is enabled and started. This will allow your self-hosted runner to pick up jobs from CircleCI. + +```console +sudo systemctl enable circleci-runner +sudo systemctl start circleci-runner +sudo systemctl status circleci-runner +``` +- **Enable CircleCI Runner**: Ensure the CircleCI runner is set to start automatically on boot. +- **Start and Check Status**: Start the CircleCI runner and verify it is running. + +After pushing your code to GitHub, open your **CircleCI Dashboard → Projects**, and confirm that your **ARM64 workflow** starts running using your **self-hosted runner**. + +If the setup is correct, you’ll see your job running under the resource class you created. + +### Output +Once the job starts running, CircleCI will: + +- Detect the ARM64 architecture. + +![CircleCI Dashboard alt-text#center](images/output1.png "Figure 1: Show architecture") + +- Build the Docker image. + +![CircleCI Dashboard alt-text#center](images/output2.png "Figure 2: Docker Image") + +- Runs a container from that image. + +![CircleCI Dashboard alt-text#center](images/output4.png "Figure 3: Container Run") + +- Test the application by hitting the endpoint. + +![CircleCI Dashboard alt-text#center](images/output3.png "Figure 3: Verify App") + +If successful, you will see your CircleCI job running and the app deployed in the CircleCI Dashboard. diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/create_resource_class.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/create_resource_class.md new file mode 100644 index 0000000000..6959ce5144 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/create_resource_class.md @@ -0,0 +1,43 @@ +--- +title: Create Resource Class in CircleCI +weight: 5 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Create a Resource Class for Self-Hosted Runner in CircleCI +This guide explains how to create a **Resource Class** in the **CircleCI Web Dashboard** for a **self-hosted runner**. +A Resource Class defines a unique identifier for your runner and links it to your CircleCI namespace, allowing CircleCI jobs to target your custom machine environment. + +### Steps + +1. **Go to the CircleCI Web Dashboard** + - From the left sidebar, navigate to **Self-Hosted Runners**. + - You’ll see a screen asking you to accept the **terms of use**. + - **Check the box** that says **“Yes, I agree to the terms”** to enable runners. + - Then click **Self-Hosted Runners** to continue setup. + +![Self-Hosted Runners alt-text#center](images/shrunner0.png "Figure 1: Self-Hosted Runners ") + +2. **Create a New Resource Class** + +Click **Create Resource Class** on your CircleCI dashboard. + +**Fill in the following details:** + + - **Namespace:** Your CircleCI username or organization name (e.g., `circleci`) + - **Resource Class Name:** A clear, descriptive name for your runner (e.g., `arm64`) + - Click **Create Resource Class**. + +![Self-Hosted Runners alt-text#center](images/shrunner1.png "Figure 2: Create Resource Class ") + +![Self-Hosted Runners alt-text#center](images/shrunner2.png "Figure 3: Details Resource Class & Namespace") + +3. **Save and Copy the Token** + - Once created, CircleCI will generate a **Resource Class Token**. + - Copy this token and store it securely — you will need it to register your runner on the GCP VM. + +![Self-Hosted Runners alt-text#center](images/shrunner3.png "Figure 4: Resource Class Token") + +Now that your resource class and token are generated, proceed to the next section to set up the CircleCI self-hosted runner. diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/dashboard.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/dashboard.png new file mode 100644 index 0000000000..a618667515 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/dashboard.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/gcp-vm.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/gcp-vm.png new file mode 100644 index 0000000000..0d1072e20d Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/gcp-vm.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output1.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output1.png new file mode 100644 index 0000000000..c1a1859936 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output1.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output2.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output2.png new file mode 100644 index 0000000000..987621744e Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output2.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output3.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output3.png new file mode 100644 index 0000000000..ff0aab2a75 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output3.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output4.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output4.png new file mode 100644 index 0000000000..afd4e4c2f3 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/output4.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner0.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner0.png new file mode 100644 index 0000000000..927a193982 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner0.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner1.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner1.png new file mode 100644 index 0000000000..bb587d2723 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner1.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner2.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner2.png new file mode 100644 index 0000000000..f85cc7a55a Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner2.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner3.png b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner3.png new file mode 100644 index 0000000000..b1362dab6c Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/images/shrunner3.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/install-circleci-cli.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/install-circleci-cli.md new file mode 100644 index 0000000000..dbf2afdfa5 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/install-circleci-cli.md @@ -0,0 +1,41 @@ +--- +title: Install CircleCI CLI +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Install CircleCI CLI on GCP VM +This guide explains how to install the **CircleCI Command Line Interface (CLI)** on a **GCP SUSE Arm64 virtual machine**. +The CLI allows you to interact with CircleCI directly from your terminal, such as to validate configuration files, run jobs locally, or manage runners. + +### Install Required Packages +Before installing the CircleCI CLI, make sure your system has the basic tools required for downloading and extracting files. + +```console +sudo zypper install curl tar gzip coreutils gpg git +``` + +## Download and Extract the CircleCI CLI +Now download the CircleCI CLI binary for Linux Arm64 and extract it. + +```console +curl -fLSs https://github.com/CircleCI-Public/circleci-cli/releases/download/v0.1.33494/circleci-cli_0.1.33494_linux_arm64.tar.gz | tar xz +sudo mv circleci-cli_0.1.33494_linux_arm64/circleci /usr/local/bin/ +``` +- The `curl` command downloads the `.tar.gz` archive from the official CircleCI GitHub release page. +- The `| tar xz` part extracts the downloaded file directly without saving it separately. +- After extraction, you’ll see a new folder named `circleci-cli_0.1.33494_linux_arm64` in your current directory. + +### Verify the Installation +Finally, verify that the CLI is installed correctly by checking its version. + +```console +circleci version +``` +You should see an output similar to: +```output +0.1.33494+7cc6570 (release) +``` +If you see similar version output, the installation was successful! diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/install_circleci_runner.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/install_circleci_runner.md new file mode 100644 index 0000000000..dbacd91960 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/install_circleci_runner.md @@ -0,0 +1,90 @@ +--- +title: Install CircleCI Machine Runner on SUSE Arm +weight: 6 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Install CircleCI Machine Runner on SUSE Arm64 + +This guide explains how to install and configure the **CircleCI Machine Runner** on a **GCP SUSE Arm64 virtual machine**. +This setup allows your self-hosted environment to execute CircleCI jobs targeting Arm64 architecture. + +### Add CircleCI Package Repository + +SUSE is an RPM-based distribution, so first add the official CircleCI repository: + +```console +curl -s https://packagecloud.io/install/repositories/circleci/runner/script.rpm.sh?any=true | sudo bash +``` + +### Install the CircleCI Runner +Install the pre-built CircleCI runner package: + +```console +sudo zypper install -y circleci-runner +``` +### Prepare User and Permissions +Before starting the runner, ensure the required user, group, and directory permissions are properly set up: + +```console +# Create a symlink for adduser (required on SUSE) +sudo ln -s /usr/sbin/useradd /usr/sbin/adduser + +# Create CircleCI system user and group +sudo useradd -m -r circleci +sudo groupadd --system circleci + +# Set up CircleCI directories and permissions +sudo mkdir -p /var/lib/circleci +sudo chown -R circleci:circleci /var/lib/circleci +sudo chown -R circleci:circleci /etc/circleci-runner + +# Reload systemd and restart the runner service +sudo systemctl daemon-reload +sudo systemctl restart circleci-runner + +# Verify service status +sudo systemctl status circleci-runner +``` + +### Configure the Runner Token + +Replace the authentication token in the runner configuration file. +Use the token obtained from your Resource Class in the CircleCI Dashboard. + +```console +export RUNNER_AUTH_TOKEN="AUTH_TOKEN " +sudo sed -i "s/<< AUTH_TOKEN >>/$RUNNER_AUTH_TOKEN/g" /etc/circleci-runner/circleci-runner-config.yaml +``` + +### Enable and Start the Runner +Enable the CircleCI runner service to start automatically and verify it’s running: + +```console +sudo systemctl enable circleci-runner +sudo systemctl start circleci-runner +sudo systemctl status circleci-runner +``` + +If the status shows active (running), your runner is successfully installed and connected to CircleCI. + +```output +● circleci-runner.service - Run the CircleCI self-hosted runner agent + Loaded: loaded (/usr/lib/systemd/system/circleci-runner.service; enabled; vendor preset: disabled) + Active: active (running) since Thu 2025-10-09 08:59:40 UTC; 2h 29min ago + Main PID: 10150 (circleci-runner) + Tasks: 9 + CPU: 1.524s + CGroup: /system.slice/circleci-runner.service + └─ 10150 /usr/bin/circleci-runner machine -c /etc/circleci-runner/circleci-runner-config.yaml + +Oct 09 11:12:11 lpprojectsusearm64 circleci-runner[10150]: 11:12:11 7927c 72.264ms worker loop: claim: app.backoff_ms=5000 a> +Oct 09 11:15:03 lpprojectsusearm64 circleci-runner[10150]: 11:15:03 6f109 46.059ms POST /api/v3/runner/claim app.loop_name=cl> +Oct 09 11:15:03 lpprojectsusearm64 circleci-runner[10150]: 11:15:03 6f109 46.119ms claim app.loop_name=claim: mode=agent res> +Oct 09 11:15:03 lpprojectsusearm64 circleci-runner[10150]: 11:15:03 6f109 46.144ms worker loop: claim: app.backoff_ms=5000 a> +``` +Also, you can verify it from the dashboard: + +![Self-Hosted Runners alt-text#center](images/dashboard.png "Figure 1: Self-Hosted Runners ") diff --git a/content/learning-paths/servers-and-cloud-computing/circleci-gcp/instance.md b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/instance.md new file mode 100644 index 0000000000..2b93bc950d --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/circleci-gcp/instance.md @@ -0,0 +1,31 @@ +--- +title: Create a Google Axion C4A Arm virtual machine on GCP +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Overview + +In this section, you will learn how to provision a Google Axion C4A Arm virtual machine on Google Cloud Platform (GCP) using the `c4a-standard-4` (4 vCPUs, 16 GB memory) machine type in the Google Cloud Console. + +{{% notice Note %}} +For support on GCP setup, see the Learning Path [Getting started with Google Cloud Platform](https://learn.arm.com/learning-paths/servers-and-cloud-computing/csp/google/). +{{% /notice %}} + +## Provision a Google Axion C4A Arm VM in Google Cloud Console + +To create a virtual machine based on the C4A instance type: +- Navigate to the [Google Cloud Console](https://console.cloud.google.com/). +- Go to **Compute Engine > VM Instances** and select **Create Instance**. +- Under **Machine configuration**: + - Populate fields such as **Instance name**, **Region**, and **Zone**. + - Set **Series** to `C4A`. + - Select `c4a-standard-4` for machine type. + + ![Create a Google Axion C4A Arm virtual machine in the Google Cloud Console with c4a-standard-4 selected alt-text#center](images/gcp-vm.png "Creating a Google Axion C4A Arm virtual machine in Google Cloud Console") + +- Under **OS and Storage**, select **Change**, then choose an Arm64-based OS image. For this Learning Path, use **SUSE Linux Enterprise Server**. Pick the preferred version for your Operating System. Ensure you select the **Arm image** variant. Click **Select**. +- Under **Networking**, enable **Allow HTTP traffic**. +- Click **Create** to launch the instance. diff --git a/content/learning-paths/servers-and-cloud-computing/codec/_index.md b/content/learning-paths/servers-and-cloud-computing/codec/_index.md index 7511d082a6..7241132bc4 100644 --- a/content/learning-paths/servers-and-cloud-computing/codec/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/codec/_index.md @@ -56,5 +56,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/csp/google.md b/content/learning-paths/servers-and-cloud-computing/csp/google.md index 343c417965..7f6fc058a8 100644 --- a/content/learning-paths/servers-and-cloud-computing/csp/google.md +++ b/content/learning-paths/servers-and-cloud-computing/csp/google.md @@ -1,113 +1,45 @@ --- -# User change -title: "Getting Started with Google Cloud Platform" +title: Getting Started with Google Cloud Platform +weight: 4 -weight: 4 # 1 is first, 2 is second, etc. - -# Do not modify these elements -layout: "learningpathall" +### FIXED, DO NOT MODIFY +layout: learningpathall --- -[Google Cloud](https://cloud.google.com/) is a public cloud computing platform. - -As with most cloud service providers, Google Cloud offers a pay-as-you-use [pricing policy](https://cloud.google.com/pricing), including a number of [free](https://cloud.google.com/free/docs/free-cloud-features) services. - -This section is to help you get started with [Google Cloud Compute Engine](https://cloud.google.com/compute) compute services, using Arm-based Virtual Machines. Google Cloud offers two generations of Arm-based VMs, `C4A` is the latest generation based on [Google Axion](https://cloud.google.com/products/axion), Google's first Arm-based server processor, built using the Armv9 Neoverse V2 CPU. The previous generation VMs are based on Ampere Altra processor and part of [Tau T2A](https://cloud.google.com/tau-vm) family of Virtual Machines. - -Detailed instructions are available in the Google Cloud [documentation](https://cloud.google.com/compute/docs/instances). - -## Create an account - -Before you begin, create an account. For a personal account, click on [Get started for free](https://cloud.google.com/), and follow the on-screen instructions to register. You can use an existing Google account if you have one. - -If using an organization's account, you will likely need to consult with your internal administrator. See [this guide](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html) for additional information. - -## Browse for an appropriate instance - -Google Cloud offers a wide range of instance types, covering all performance (and pricing) points. For an overview of the `C4A` instance types, see the [General-purpose machine family](https://cloud.google.com/compute/docs/general-purpose-machines#c4a_series). Similarly, to know more about the `T2A` instance types, see this [page](https://cloud.google.com/compute/docs/general-purpose-machines#t2a_machines). - -Also note which [regions](https://cloud.google.com/compute/docs/regions-zones#available) these servers are available in. - -## Create your Compute Engine instance - -The easiest way to launch your instance is via the [Google Cloud Console](https://console.cloud.google.com). Activities can be separated by `Project`. By default, when you first login, you will be in `My First Project`. If you wish to rename this, navigate to `IAM & Admin` > `Settings`, and rename. You can also create new project(s) from the pull-down menu, or [directly](https://console.cloud.google.com/projectcreate). - -Select `Compute Engine` from the `Quick access` section, if shown. Else navigate to `Compute Engine` > `VM instances`. If it is your first time, you will be prompted to enable `Compute Engine API`. Go to the [VM instances](https://console.cloud.google.com/compute/instances) area of the console. - -Click the `CREATE INSTANCE` button. - -![google1 #center](https://github.com/ArmDeveloperEcosystem/arm-learning-paths/assets/71631645/4db59b89-2d9b-453a-8f9c-52780f4f134e "Create a VM instance") - -### Name your instance - -Give your instance a meaningful, but arbitrary, name. This is particularly useful when creating multiple instances. You can optionally add [labels](https://cloud.google.com/resource-manager/docs/creating-managing-labels) as additional identifiers. - -![google2 #center](https://github.com/ArmDeveloperEcosystem/arm-learning-paths/assets/71631645/e81f1cb9-8d62-471a-be56-6d6a1da64e46 "Specify a name for the instance and optionally add labels") - -### Select Region and Zone for your instance. - -Select an appropriate `region` and `zone` that support Arm-based servers. - -![google3 #center](https://github.com/ArmDeveloperEcosystem/arm-learning-paths/assets/71631645/f2a19cd0-7565-44d3-9e6f-b27bccad3e86 "Select an appropriate region and zone") - -To view the latest information on which available regions and zones support Arm-based servers, see the [Compute Engine documentation](https://cloud.google.com/compute/docs/regions-zones#available). To filter for Arm-based machines, click on `Select a machine type`, then select `C4A` or `T2A` from the pull-down menu. - -![google4 #center](images/axion-series.png "Check which regions and zones support Arm-based machines") - -### Machine configuration -Select `C4A` from the `Series` pull-down menu. Then select an appropriate `Machine type` configuration for your needs. +## Overview -![google5 #center](images/axion-instance.png "Select an appropriate C4A machine type") +In this section, you will learn how to provision a Google Axion C4A Arm virtual machine on Google Cloud Platform (GCP) using the `c4a-standard-4` (4 vCPUs, 16 GB memory) machine type in the Google Cloud Console. -### Boot disk configuration - -Click the `CHANGE` button if you wish to change the virtual disk size, or the operating system or version, for example to `Ubuntu 24.04 LTS`. Be sure to select Arm compatible image. - -![alt-text #center](https://user-images.githubusercontent.com/67620689/204448755-f1259724-a386-4dc3-9b88-8ece7057d4de.PNG "Edit boot disk configuration if necessary") - -## Security and SSH key pair - -By default, you can access your instance via the browser. If you wish to use an SSH terminal, you must [create](https://cloud.google.com/compute/docs/connect/create-ssh-keys) and [add](https://cloud.google.com/compute/docs/connect/add-ssh-keys) an appropriate SSH key pair. - -![alt-text #center](https://user-images.githubusercontent.com/67620689/225616099-8fc7791a-24b3-4195-b957-154eaca43080.PNG "Add or create an SSH key pair to access the VM") - -### Other options - -Other options, such as `Confidential VM service`, can optionally be enabled. For now, leave as default (disabled). See the Google Cloud documentation for an explanation of these configurations. - -When satisfied, click `CREATE`. After a few moments the instance will be available and listed in your [console](https://console.cloud.google.com/compute/instances). - -![google8 #center](https://github.com/ArmDeveloperEcosystem/arm-learning-paths/assets/71631645/0a9bee8f-cd4c-478c-b8d1-85db99d8ef3a "Create the VM instance") - -## Connect to your instance - -You can interact with your instance via the browser (SSH-in-browser) or via an SSH terminal application. - -### SSH-in-browser Connect +{{% notice Note %}} +For support on GCP setup, see the Learning Path [Getting started with Google Cloud Platform](https://learn.arm.com/learning-paths/servers-and-cloud-computing/csp/google/). +{{% /notice %}} -Once running, the IP address will be displayed, and you are able to connect to the instance. +## Provision a Google Axion C4A Arm VM in Google Cloud Console -Select `Open in browser window` from the `SSH` pull-down to open an SSH shell directly. +To create a virtual machine based on the C4A instance type: +- Navigate to the [Google Cloud Console](https://console.cloud.google.com/). +- Go to **Compute Engine > VM Instances** and select **Create Instance**. +- Under **Machine configuration**: + - Populate fields such as **Instance name**, **Region**, and **Zone**. + - Set **Series** to `C4A`. + - Select `c4a-standard-4` for machine type. -![alt-text #center](https://user-images.githubusercontent.com/97123064/244489641-3069f9b4-4d41-464b-9cd0-f2db55195c8b.png "Open SSH-in-browser through the Google Cloud console") + ![Create a Google Axion C4A Arm virtual machine in the Google Cloud Console with c4a-standard-4 selected alt-text#center](images/gcp-vm.png "Creating a Google Axion C4A Arm virtual machine in Google Cloud Console") -Once connected, you are now ready to use your instance. -### SSH client Connect +- Under **OS and Storage**, select **Change**, then choose an Arm64-based OS image. For this Learning Path, use **SUSE Linux Enterprise Server** or **Ubuntu**. + - If using use **SUSE Linux Enterprise Server**. Select "Pay As You Go" for the license type. + - If using **Ubuntu**, under the **Version** tab, please scroll down and select the aarch64 version of **Ubuntu 22.04 LTS**. +- Once appropriately selected, please Click **Select**. +- Under **Networking**, enable **Allow HTTP traffic**. +- Click **Create** to launch the instance. +- Once created, you should see a "SSH" option to the right in your list of VM instances. Click on this to launch a SSH shell into your VM instance: -If an SSH key pair was set, connect to the instance with your preferred SSH client. For example, if using `ubuntu` image: +![Invoke a SSH session via your browser alt-text#center](images/gcp-ssh.png "Invoke a SSH session into your running VM instance") -```console -ssh -i ubuntu@ -``` +- A window from your browser should come up and you should now see a shell into your VM instance: -{{% notice Note %}} -Replace `` with the private key on your local machine and `` with the public IP of the target VM. -{{% /notice %}} - -Terminal applications such as [PuTTY](https://www.putty.org/), [MobaXterm](https://mobaxterm.mobatek.net/) and similar can be used. - -Once connected, you are now ready to use your instance. +![Terminal Shell in your VM instance alt-text#center](images/gcp-shell.png "Terminal shell in your VM instance") ## Explore your instance @@ -122,12 +54,19 @@ will identify the host machine as `aarch64`. ### Run hello world -Install the `gcc` compiler. If you are using `Ubuntu`, use the following commands. If not, refer to the [GNU compiler install guide](/install-guides/gcc): +Install the `gcc` compiler: + +{{< tabpane code=true >}} + {{< tab header="Ubuntu" language="bash">}} +sudo apt update +sudo apt install -y build-essential + {{< /tab >}} + {{< tab header="SUSE Linux" language="bash">}} +sudo zypper refresh +sudo zypper install -y gcc + {{< /tab >}} +{{< /tabpane >}} -```console -sudo apt-get update -sudo apt install -y gcc -``` Using a text editor of your choice, create a file named `hello.c` with the contents below: ```C @@ -154,4 +93,4 @@ hello world Cloud infrastructure deployment is typically done via Infrastructure as code (IaC) automation tools. There are Cloud Service Provider specific tools like [Google Cloud Deployment Manager](https://cloud.google.com/deployment-manager/docs/). -There are also Cloud Service Provider agnostic tools like [Terraform](https://www.terraform.io/).There is a [deploying Arm VMs on (GCP) using Terraform learning path](/learning-paths/servers-and-cloud-computing/gcp) that should be reviewed next. +There are also Cloud Service Provider agnostic tools like [Terraform](https://www.terraform.io/).There is a [deploying Arm VMs on (GCP) using Terraform learning path](/learning-paths/servers-and-cloud-computing/gcp) that should be reviewed next. \ No newline at end of file diff --git a/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-shell.png b/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-shell.png new file mode 100644 index 0000000000..7e2fc3d1b5 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-shell.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-ssh.png b/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-ssh.png new file mode 100644 index 0000000000..597ccd7fea Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-ssh.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-vm.png b/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-vm.png new file mode 100644 index 0000000000..0d1072e20d Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/csp/images/gcp-vm.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/flink/_index.md b/content/learning-paths/servers-and-cloud-computing/flink/_index.md index 9822df5ab5..c872e5ab76 100644 --- a/content/learning-paths/servers-and-cloud-computing/flink/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/flink/_index.md @@ -23,9 +23,7 @@ cloud_service_providers: AWS armips: - Neoverse -layout: learningpathall -learning_path_main_page: 'yes' operatingsystems: - Linux diff --git a/content/learning-paths/servers-and-cloud-computing/glibc-with-lse/_index.md b/content/learning-paths/servers-and-cloud-computing/glibc-with-lse/_index.md index eea49bbb39..2a1bfbced6 100644 --- a/content/learning-paths/servers-and-cloud-computing/glibc-with-lse/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/glibc-with-lse/_index.md @@ -23,9 +23,7 @@ subjects: Performance and Architecture armips: - Neoverse -layout: learningpathall -learning_path_main_page: 'yes' operatingsystems: - Linux diff --git a/content/learning-paths/servers-and-cloud-computing/memcached/_index.md b/content/learning-paths/servers-and-cloud-computing/memcached/_index.md index 21e29d41fd..a05e80842a 100644 --- a/content/learning-paths/servers-and-cloud-computing/memcached/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/memcached/_index.md @@ -47,5 +47,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md b/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md index 8911ab48ed..562fdee0f2 100644 --- a/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md @@ -51,5 +51,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md b/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md index d10a5d34f5..a08eb203d8 100644 --- a/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md @@ -54,5 +54,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/_index.md b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/_index.md index 3c97d84ac8..17dc3e75f9 100644 --- a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/_index.md @@ -1,9 +1,7 @@ --- -title: Deploy Node.js on Google Cloud C4A (Arm-based Axion VMs) +title: Deploy Node.js on Google Cloud C4A Arm-based Axion VMs + -draft: true -cascade: - draft: true minutes_to_complete: 30 @@ -11,9 +9,9 @@ who_is_this_for: This is an introductory topic for software developers migrating learning_objectives: - - Provision an Arm-based SUSE SLES virtual machine on Google Cloud (C4A with Axion processors) + - Provision an Arm-based SUSE Linux Enterprise Server virtual machine on Google Cloud C4A instances with Axion processors - Install and configure Node.js on a SUSE Arm64 (C4A) instance - - Validate Node.js functionality with baseline HTTP server tests + - Validate Node.js functionality with baseline HTTP server tests - Benchmark Node.js performance using Autocannon on Arm64 (AArch64) architecture diff --git a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/background.md b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/background.md index 715c359130..42fe0869be 100644 --- a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/background.md +++ b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/background.md @@ -8,16 +8,14 @@ layout: "learningpathall" ## Google Axion C4A Arm instances in Google Cloud -Google Axion C4A is a family of Arm-based virtual machines built on Google’s custom Axion CPU, which is based on Arm Neoverse-V2 cores. Designed for high-performance and energy-efficient computing, these virtual machines offer strong performance for modern cloud workloads such as CI/CD pipelines, microservices, media processing, and general-purpose applications. +Google Axion C4A is a family of Arm-based virtual machines powered by Google’s custom Axion CPU, built on Arm Neoverse-V2 cores. These instances deliver high performance and energy efficiency for modern cloud workloads, including CI/CD pipelines, microservices, media processing, and general-purpose applications. The C4A series provides a cost-effective alternative to x86 virtual machines while leveraging the scalability and performance benefits of the Arm architecture in Google Cloud. -To learn more about Google Axion, refer to the [Introducing Google Axion Processors, our new Arm-based CPUs](https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu) blog. +For more information on Google Axion, see the Google blog [Introducing Google Axion Processors, our new Arm-based CPUs](https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu). ## Node.js -Node.js is an open-source, cross-platform JavaScript runtime environment built on Chrome's V8 engine. +Node.js is an open-source, cross-platform JavaScript runtime environment built on Chrome's V8 engine. It enables you to build scalable server-side applications, APIs, and backend services using JavaScript. Node.js features an event-driven, non-blocking I/O model, making it highly efficient for handling concurrent connections. Node.js is widely used for web servers, real-time applications, microservices, and cloud-native backend services. -It allows developers to build scalable server-side applications, APIs, and backend services using JavaScript. Node.js features an event-driven, non-blocking I/O model, making it highly efficient for handling concurrent connections. - -Node.js is widely used for web servers, real-time applications, microservices, and cloud-native backend services. Learn more from the [Node.js official website](https://nodejs.org/en) and its [official documentation](https://nodejs.org/docs/latest/api/). +For more information on Node.js, see the [Node.js website](https://nodejs.org/en) and the [Node.js documentation](https://nodejs.org/docs/latest/api/). diff --git a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/baseline.md b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/baseline.md index ea830061ff..ca29d31e3d 100644 --- a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/baseline.md +++ b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/baseline.md @@ -1,39 +1,41 @@ --- -title: Node.js baseline testing on Google Axion C4A Arm Virtual machine +title: Validate Node.js baseline on Google Axion C4A Arm virtual machine weight: 5 ### FIXED, DO NOT MODIFY layout: learningpathall --- +## Validate Node.js installation with a baseline test -Since Node.js has been successfully installed on your GCP C4A Arm virtual machine, please follow these steps to make sure that it is running. +Confirm that your Node.js installation works as expected before benchmarking performance on your Arm-based VM. Run these baseline tests to verify that Node.js is installed correctly and can execute JavaScript code and serve HTTP requests. Catch setup issues early and ensure your environment is ready for further testing. -## Validate Node.js installation with a baseline test +## Run a simple REPL test -### 1. Run a Simple REPL Test -The Node.js REPL (Read-Eval-Print Loop) allows you to run JavaScript commands interactively. +Start the Node.js REPL (Read-Eval-Print Loop) to run JavaScript commands interactively: ```console node ``` -Inside the REPL, type: + +Type the following command inside the REPL: ```console console.log("Hello from Node.js"); ``` -You should see an output similar to: + +The output is similar to: ```output Hello from Node.js undefined ``` -This confirms that Node.js can execute JavaScript commands successfully. Please now press "Ctrl-D" to exit node. -### 2. Test a Basic HTTP Server -You can now create a small HTTP server to validate that Node.js can handle web requests. +This confirms that Node.js can execute JavaScript commands successfully. Press "Ctrl-D" to exit node. + +## Test a basic HTTP server -Use a text editor to create a file named `app.js` with the code below: +Create a file named `app.js` with the following code to validate that Node.js can handle web requests: ```javascript const http = require('http'); @@ -47,34 +49,39 @@ server.listen(80, '0.0.0.0', () => { console.log('Server running at http://0.0.0.0:80/'); }); ``` - - This server listens on port 80. - - Binding to 0.0.0.0 allows connections from any IP, not just localhost. -Next, we run the HTTP server in the background via sudo: +The server listens for incoming connections on port 80, which is the default port for HTTP traffic. By binding to the IP address 0.0.0.0, the server accepts connections from any network interface, not just from localhost. This configuration enables access from other devices on the network. + +Run the HTTP server in the background using sudo: ```console export MY_NODE=`which node` sudo ${MY_NODE} app.js & ``` -You should see an output similar to: + +The expected output is: ```output Server running at http://0.0.0.0:80/ ``` -#### Test Locally with Curl + +## Test locally with curl + +Run the following command to test the server locally: ```console curl http://localhost:80 ``` -You should see an output similar to: +The expected output is: ```output Baseline test successful! ``` -#### Test from a Browser -Also, you can access it from the browser with your VM's public IP. Run the following command to print your VM’s public URL, then open it in a browser: +## Test from a browser + +Print your VM’s public URL and open it in a browser: ```console echo "http://$(curl -s ifconfig.me):80/" @@ -82,6 +89,6 @@ echo "http://$(curl -s ifconfig.me):80/" You should see the following message in your browser, confirming that your Node.js HTTP server is running successfully: -![Node.js Browser alt-text#center](images/node-browser.png) +![Screenshot showing the browser displaying 'Baseline test successful!' from the Node.js HTTP server running on a Google Axion C4A Arm VM. alt-text#center](images/node-browser.png "Browser displaying baseline test successful dialogue message") -This verifies the basic functionality of the Node.js installation before proceeding to the benchmarking. +You have now validated that Node.js is working correctly on your Arm VM. Proceed to benchmarking and performance testing. diff --git a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/benchmarking.md b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/benchmarking.md index e0dbfc7d8b..0af212f6c1 100644 --- a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/benchmarking.md +++ b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/benchmarking.md @@ -1,44 +1,53 @@ --- -title: Node.js Benchmarking +title: Benchmark Node.js performance with Autocannon weight: 6 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Node.js Benchmarking by Autocannon +## Benchmark Node.js with Autocannon -After validating that Node.js is installed and your HTTP server is running, you can benchmark it using **Autocannon**. +After validating that Node.js is installed and your HTTP server is running, you can benchmark it using Autocannon. You'll use Autocannon to run a series of tests, analyze the results, and identify areas for optimization. Benchmarking on Arm64 provides valuable insights into how Node.js applications scale and perform in cloud environments, helping you make informed decisions about deployment and resource allocation. -### Install Autocannon -**Autocannon** is a fast HTTP/1.1 benchmarking tool for Node.js, used to measure server throughput, latency, and request handling under concurrent load. +## Install Autocannon + +Autocannon is a fast HTTP/1.1 benchmarking tool for Node.js, used to measure server throughput, latency, and request handling under concurrent load. To install Autocannon, run this command: ```console npm install -g autocannon ``` -### Start Your Node.js HTTP Server +## Start the Node.js HTTP server -If your sample HTTP server is not already running from the last section, you can start it by typing: +If your sample HTTP server isn't running from the last section, start it by using this command: ```console export MY_NODE=`which node` sudo ${MY_NODE} app.js & ``` -Server should be listening on port 80 in the background: +The server should be listening on port 80 in the background: ```output Server running at http://0.0.0.0:80/ ``` -### Run a Basic Benchmark (Local) +## Run a local Node.js benchmark with Autocannon + +Now run a local Node.js benchmark with Autocannon: ```console autocannon -c 100 -d 10 http://localhost:80 ``` -- `-c 100` → 100 concurrent connections -- `-d 10` → duration 10 seconds -- URL → endpoint to test +{{% notice Tip %}} +These options specify how the benchmarking tool runs the test: + +- The `-c 100` flag sets the number of concurrent connections to one hundred, simulating multiple users accessing the endpoint at the same time. +- The `-d 10` flag sets the test duration to ten seconds, so the tool sends requests for that period. +- The URL is the endpoint you're measuring, which could be a web service or API running on your Arm server. + +This configuration helps you evaluate how your application performs under load on Arm platforms. +{{% /notice %}} You should see an output similar to: ```output @@ -65,48 +74,35 @@ Req/Bytes counts sampled once per second. 707k requests in 10.02s, 137 MB read ``` -### Understanding Node.js benchmark metrics and results with Autocannon - -- **Avg (Average Latency)** → The mean time it took for requests to get a response. -- **Stdev (Standard Deviation)** → How much individual request times vary around the average. Smaller numbers mean more consistent response times. -- **Min (Minimum Latency)** → The fastest request observed during the test. - -### Benchmark summary on x86_64 -To compare the benchmark results, the following results were collected by running the same benchmark on a `x86 - c4-standard-4` (4 vCPUs, 15 GB Memory) x86_64 VM in GCP, running SUSE: - -Latency (ms): +## Interpret the Autocannon benchmark metrics -| Metric | 2.5% | 50% (Median) | 97.5% | 99% | Avg | Stdev | Max | -|----------|------|--------------|-------|-----|--------|--------|-------| -| Latency | 0 | 1 | 2 | 2 | 0.73 | 0.87 | 104 | +Now have a look at the Autocannon benchmark metrics to get a sense of how Node.js performed. Here is an explanation of the metrics and what they mean: -Throughput: +- The average latency (Avg) shows the mean time it takes for each request to receive a response from the server. +- Standard deviation (Stdev) indicates how much the response times vary around the average; lower values mean the server responds more consistently. +- The minimum latency (Min) represents the fastest response recorded during the benchmark, highlighting the best-case performance for individual requests. -| Metric | 1% | 2.5% | 50% | 97.5% | Avg | Stdev | Min | -|------------|--------|--------|---------|---------|----------|-----------|---------| -| Req/Sec | 70,143 | 70,143 | 84,479 | 93,887 | 84,128 | 7,547.18 | 70,095 | -| Bytes/Sec | 13.6 MB| 13.6 MB| 16.4 MB | 18.2 MB | 16.3 MB | 1.47 MB | 13.6 MB| +## Review Node.js benchmark results on Arm64 -### Benchmark summary on Arm64 -Results from the earlier run on the `c4a-standard-4` (4 vCPU, 16 GB memory) Arm64 VM in GCP (SUSE): +Here are the results from the earlier run on the `c4a-standard-4` (4 vCPU, 16 GB memory) Arm64 VM in GCP (SUSE): -Latency (ms): +### Latency results (ms): | Metric | 2.5% | 50% (Median) | 97.5% | 99% | Avg | Stdev | Max | |----------|------|--------------|-------|-----|------|-------|------| | Latency | 1 | 1 | 3 | 3 | 1.2 | 0.62 | 24 | -Throughput: +### Throughput results: | Metric | 1% | 2.5% | 50% | 97.5% | Avg | Stdev | Min | |------------|--------|--------|---------|---------|----------|----------|---------| | Req/Sec | 45,279 | 45,279 | 54,719 | 55,199 | 53,798.4 | 2,863.96 | 45,257 | | Bytes/Sec | 8.78 MB| 8.78 MB| 10.6 MB | 10.7 MB | 10.4 MB | 557 kB | 8.78 MB | -### Node.js performance benchmarking comparison on Arm64 and x86_64 -When you compare the benchmarking results, you will notice that on the Google Axion C4A Arm-based instances: +## Evaluate Node.js performance on Arm64 -- Average latency is very low (~1.2 ms) with consistent response times. -- Maximum latency spikes are rare, reaching up to 24 ms. -- The server handles high throughput, averaging ~53,798 requests/sec. -- Data transfer rate averages 10.4 MB/sec, demonstrating efficient performance under load. +Now that you have the benchmarking results, you can see how Node.js performs on Arm64: +- The average latency is low, around 1.2 ms, which means your server responds quickly to requests. +- Response times are consistent, with only occasional spikes up to 24 ms. +- The server processes a high volume of traffic, averaging about 53,800 requests per second. +- Data transfer rates are efficient, averaging 10.4 MB per second during the benchmark. diff --git a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/installation.md b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/installation.md index 344ae9471c..5f9dbe67ff 100644 --- a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/installation.md +++ b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/installation.md @@ -1,22 +1,30 @@ --- -title: Install Node.js Using Node Version Manager +title: Install Node.js using Node Version Manager weight: 4 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Install Node.js with Node Version Manager (NVM) -This guide walks you through installing **NodeJS** via the Node Version Manager (NVM). NVM is a powerful tool that allows users to specify which version of **NodeJS** that they want to use. NVM will then download and install the requested vesion using the **NodeJS** official packages. +## Install Node Version Manager (NVM) +To install Node.js on your Arm-based VM, use Node Version Manager (NVM). NVM lets you select and manage different Node.js versions easily. By using official Node.js packages, you'll get a reliable and straightforward setup. -### 1. Install Node Version Manager (NVM) -First, we will run this command to download and install NVM into our VM instance: + +First, use this command to download and install NVM into your VM instance: ```console curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash ``` -Next, we have to activate NVM in our terminal shell. We can manually activate our current shell via copy and paste of the following into the shell: +Next, activate Node Version Manager (NVM) in your current terminal session. Copy and paste the following commands into your shell to load NVM and enable command completion: + +```console +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" +[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" +``` + +This step ensures that NVM commands are available in your shell. If you open a new terminal, repeat these commands or add them to your `~/.bashrc` file for automatic activation: ```console export NVM_DIR="$HOME/.nvm" @@ -24,30 +32,28 @@ export NVM_DIR="$HOME/.nvm" [ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion ``` -You should be able to confirm that NVM is available by typing: +Confirm that NVM is available by typing: ```console nvm --version ``` -### 2. Install NodeJS -Now that NVM is installed, we simply type the following commands in our shell to download and install **NodeJS**: +## Install Node.js +Now that NVM is installed, download and install Node.js: ```console nvm install v24 nvm use v24 ``` -Additionally, we can add this command to the bottom of our $HOME/.bashrc file: +Next, add this command to the bottom of your $HOME/.bashrc file: ```console echo 'nvm use v24' >> ~/.bashrc ``` -### 3. Verify Installation -Check that Node.js and npm (Node’s package manager) are installed correctly. - -You should be able to confirm that **NodeJS** is now installed and available! +## Verify installation +Check that Node.js and npm (Node.js package manager) are installed correctly by using this command that confirms that **NodeJS** is installed and available: ```console node --version @@ -60,4 +66,10 @@ v24.10.0 11.6.1 ``` -Node.js installation is complete. You can now proceed with the baseline testing. +This shows you that Node.js installation is complete. You can now proceed with the baseline testing. + +## What you've accomplished + +You've successfully provisioned a Google Axion C4A Arm virtual machine running SUSE Linux Enterprise Server. You're now ready to install Node.js and deploy your workloads on Arm. + + diff --git a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/instance.md b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/instance.md index fc33e92cfe..b9b859c089 100644 --- a/content/learning-paths/servers-and-cloud-computing/node-js-gcp/instance.md +++ b/content/learning-paths/servers-and-cloud-computing/node-js-gcp/instance.md @@ -27,7 +27,10 @@ To create a virtual machine based on the C4A instance type: ![Create a Google Axion C4A Arm virtual machine in the Google Cloud Console with c4a-standard-4 selected alt-text#center](images/gcp-vm.png "Creating a Google Axion C4A Arm virtual machine in Google Cloud Console") -- Under **OS and Storage**, select **Change**, then choose an Arm64-based OS image. For this Learning Path, use **SUSE Linux Enterprise Server**. Select "Pay As You Go" for the license type. Click **Select**. +- Under **OS and Storage**, select **Change**, then choose an Arm64-based OS image. For this Learning Path, use **SUSE Linux Enterprise Server** or **Ubuntu**. + - If using use **SUSE Linux Enterprise Server**. Select "Pay As You Go" for the license type. + - If using **Ubuntu**, under the **Version** tab, please scroll down and select the aarch64 version of **Ubuntu 22.04 LTS**. +- Once appropriately selected, please Click **Select**. - Under **Networking**, enable **Allow HTTP traffic**. - Click **Create** to launch the instance. - Once created, you should see a "SSH" option to the right in your list of VM instances. Click on this to launch a SSH shell into your VM instance: @@ -37,3 +40,58 @@ To create a virtual machine based on the C4A instance type: - A window from your browser should come up and you should now see a shell into your VM instance: ![Terminal Shell in your VM instance alt-text#center](images/gcp-shell.png "Terminal shell in your VM instance") + +## Explore your instance + +### Run uname + +Use the [uname](https://en.wikipedia.org/wiki/Uname) utility to verify that you are using an Arm-based server. For example: + +```console +uname -m +``` +will identify the host machine as `aarch64`. + +### Run hello world + +Install the `gcc` compiler: + +{{< tabpane code=true >}} + {{< tab header="Ubuntu" language="bash">}} +sudo apt update +sudo apt install -y build-essential + {{< /tab >}} + {{< tab header="SUSE Linux" language="bash">}} +sudo zypper refresh +sudo zypper install -y gcc + {{< /tab >}} +{{< /tabpane >}} + +Using a text editor of your choice, create a file named `hello.c` with the contents below: + +```C +#include +int main(){ + printf("hello world\n"); + return 0; +} +``` +Build and run the application: + +```console +gcc hello.c -o hello +./hello +``` + +The output is shown below: + +```output +hello world +``` + +## Automating Arm Based Infrastructure Deployment + +Cloud infrastructure deployment is typically done via Infrastructure as code (IaC) automation tools. There are Cloud Service Provider specific tools like [Google Cloud Deployment Manager](https://cloud.google.com/deployment-manager/docs/). + +There are also Cloud Service Provider agnostic tools like [Terraform](https://www.terraform.io/).There is a [deploying Arm VMs on (GCP) using Terraform learning path](/learning-paths/servers-and-cloud-computing/gcp) that should be reviewed next. + diff --git a/content/learning-paths/servers-and-cloud-computing/onnx/_demo.md b/content/learning-paths/servers-and-cloud-computing/onnx/_demo.md index 1c9a1872e9..1fcfe43042 100644 --- a/content/learning-paths/servers-and-cloud-computing/onnx/_demo.md +++ b/content/learning-paths/servers-and-cloud-computing/onnx/_demo.md @@ -29,8 +29,6 @@ rag_data_cutoff_date: 2025/01/17 title_chatbot_area: Phi-4-mini Chatbot Demo -prismjs: true - ### Specific details to this demo @@ -51,7 +49,6 @@ tps_ranges: ### FIXED, DO NOT MODIFY # ================================================================================ demo_template_name: phi_onnx_chatbot_demo # allows the 'demo.html' partial to route to the correct Configuration and Demo/Stats sub partials for page render. -weight: 2 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. --- diff --git a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/_index.md b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/_index.md index f421de80f2..fd77bac1f6 100644 --- a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/_index.md @@ -1,19 +1,15 @@ --- -title: Deploy PHP on Google Cloud C4A (Arm-based Axion VMs) +title: Deploy PHP on Google Cloud C4A Arm-based Axion VMs -draft: true -cascade: - draft: true - + minutes_to_complete: 30 -who_is_this_for: This is an introductory topic for software developers migrating PHP workloads from x86_64 to Arm-based servers, specifically on Google Cloud C4A virtual machines built on Axion processors. - +who_is_this_for: This is an introductory topic for developers migrating Hypertext Preprocessor (PHP) workloads from x86_64 to Arm-based servers, specifically on Google Cloud C4A virtual machines (VM) built on Axion processors. learning_objectives: - - Provision a SUSE SLES virtual machine on Google Cloud C4A (Arm-based Axion VM) - - Install PHP on a SUSE Arm64 (C4A) instance - - Validate PHP functionality with baseline HTTP server tests + - Provision a SUSE Linux Enterprise Server (SLES) virtual machine on a Google Cloud C4A Arm-based Axion virtual machine + - Install PHP on a SUSE Arm64 C4A instance + - Validate PHP functionality by running baseline HTTP server tests - Benchmark PHP performance using PHPBench on Arm64 architecture @@ -32,7 +28,7 @@ armips: tools_software_languages: - PHP - - apache + - Apache - PHPBench operatingsystems: diff --git a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/background.md b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/background.md index 8bbb374d12..b00b9008d6 100644 --- a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/background.md +++ b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/background.md @@ -1,23 +1,22 @@ --- -title: Get started with PHP on Google Axion C4A (Arm Neoverse V2) +title: Get started with PHP on Google Cloud Axion C4A Arm VMs weight: 2 layout: "learningpathall" --- -## Google Axion C4A Arm instances in Google Cloud +## PHP -Google Axion C4A is a family of Arm-based virtual machines built on Google’s custom Axion CPU, which is based on Arm Neoverse V2 cores. Designed for high-performance and energy-efficient computing, these virtual machines offer strong performance for modern cloud workloads such as CI/CD pipelines, microservices, media processing, and general-purpose applications. +PHP (Hypertext Preprocessor) is an open-source, server-side scripting language designed for web development. You can embed PHP directly into HTML, making it easy to generate content dynamically on the server before sending it to the browser. PHP is widely used for websites, web applications, content management systems (CMS), and APIs. For more information, see the [PHP website](https://www.php.net/) and the [PHP documentation](https://www.php.net/docs.php). -The C4A series provides a cost-effective alternative to x86 virtual machines while leveraging the scalability and performance benefits of the Arm architecture in Google Cloud. -To learn more about Google Axion, refer to the [Introducing Google Axion Processors, our new Arm-based CPUs](https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu) blog. +## Google Axion C4A -## PHP +Google Axion C4A is a family of Arm-based virtual machines powered by Google's Axion CPU, built with Arm Neoverse V2 cores. They deliver high performance and energy efficiency for cloud workloads such as CI/CD pipelines, microservices, media processing, and general-purpose applications. The C4A series provides a cost-effective alternative to x86 virtual machines while leveraging the scalability and performance benefits of the Arm architecture in Google Cloud. -PHP (Hypertext Preprocessor) is an open-source, server-side scripting language designed for web development. +For more information on Google Axion, see the Google blog [Introducing Google Axion Processors, our new Arm-based CPUs](https://cloud.google.com/blog/products/compute/introducing-googles-new-arm-based-cpu). -It allows developers to create dynamic web pages, interact with databases, handle forms, and build web applications. PHP can be embedded directly into HTML, making it easy to generate content dynamically on the server before sending it to the browser. +## What's next? -PHP is widely used for websites, web applications, content management systems (CMS), and APIs. Learn more from the [PHP official website](https://www.php.net/) and its [official documentation](https://www.php.net/docs.php). +You’re now ready to begin deploying PHP workloads on Arm-based Google Cloud Axion C4A VMs. This Learning Path shows you how to optimize PHP for Arm servers, benchmark performance, and migrate from x86 to Arm with confidence. diff --git a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/baseline.md b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/baseline.md index e72b143540..781a185e51 100644 --- a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/baseline.md +++ b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/baseline.md @@ -1,5 +1,5 @@ --- -title: PHP baseline testing on Google Axion C4A Arm Virtual Machine +title: Validate PHP baseline on Google Cloud Axion C4A Arm VM weight: 5 ### FIXED, DO NOT MODIFY @@ -7,17 +7,16 @@ layout: learningpathall --- -## Baseline Setup for PHP-FPM -This section guides you through configuring PHP-FPM (FastCGI Process Manager) on a SUSE Arm-based Google Cloud C4A virtual machine. You will prepare the PHP-FPM pool configuration, verify PHP's FastCGI setup, and later connect it to Apache to confirm end-to-end functionality. +## Configure PHP-FPM baseline +In this section, you’ll configure PHP-FPM (FastCGI Process Manager) on a SUSE Linux Arm-based Google Cloud Axion C4A virtual machine. You’ll set up the PHP-FPM pool and verify the FastCGI configuration. Next, you’ll connect PHP-FPM to Apache. These steps confirm that your Arm server is ready for dynamic PHP workloads. -### Configure the PHP-FPM Pool +## Set up the PHP-FPM pool -PHP-FPM (FastCGI Process Manager) runs PHP scripts in dedicated worker processes that are independent of the web server. -This design improves performance, security, and fault isolation — especially useful on multi-core Arm-based processors like Google Cloud’s Axion C4A VMs. +PHP-FPM (FastCGI Process Manager) runs PHP scripts in dedicated worker processes, independent of the web server. You’ll notice better performance, stronger security, and improved fault isolation when you use multi-core Arm-based processors like Google Cloud Axion C4A VMs. -A pool defines a group of PHP worker processes, each serving incoming FastCGI requests. Different applications or virtual hosts can use separate pools for better resource control. +A pool is a group of PHP worker processes that serve FastCGI requests. You can use separate pools for different applications or virtual hosts to control resources more effectively. -### Copy the Default Configuration (if missing) +## Copy the default configuration If your PHP-FPM configuration files don't exist yet (for example, after a minimal installation in this Learning Path), copy the defaults into place using the commands below: @@ -25,11 +24,11 @@ If your PHP-FPM configuration files don't exist yet (for example, after a minima sudo cp /etc/php8/fpm/php-fpm.d/www.conf.default /etc/php8/fpm/php-fpm.d/www.conf sudo cp /etc/php8/fpm/php-fpm.conf.default /etc/php8/fpm/php-fpm.conf ``` -These commands: -Create a default pool configuration (www.conf) that controls how PHP-FPM spawns and manages worker processes. -Restore the main FPM service configuration (php-fpm.conf) if it's missing. +These commands do the following: +- Create a default pool configuration (`www.conf`) to control how PHP-FPM spawns and manages worker processes +- Restore the main FPM service configuration (`php-fpm.conf`) if it’s missing -### Edit the Configuration +## Edit the PHP-FPM configuration Open the PHP-FPM pool configuration file in a text editor: @@ -51,7 +50,7 @@ listen.group = www listen.mode = 0660 ``` -Explanation of each directive: +This table gives you an explanation of each directive: | Directive | Description | | ----------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | listen = /run/php-fpm/[www.sock](http://www.sock) | Configures PHP-FPM to communicate with Apache using a local Unix socket instead of a TCP port (`127.0.0.1:9000`). This reduces network overhead and improves performance. | @@ -60,7 +59,7 @@ Explanation of each directive: | listen.mode = 0660 | Defines file permissions so that both the owner (`wwwrun`) and group (`www`) can read and write to the socket. This enables smooth communication between Apache and PHP-FPM. | -### Start and Enable PHP-FPM +## Start and enable PHP-FPM service After updating the configuration, restart the PHP-FPM service so it picks up the new settings: @@ -91,11 +90,11 @@ You should see output similar to: Oct 16 13:56:44 pareena-php-test systemd[1]: Starting The PHP FastCGI Process Manager... Oct 16 13:56:44 pareena-php-test systemd[1]: Started The PHP FastCGI Process Manager. ``` -PHP-FPM is now active and ready to process requests via its Unix socket (/run/php-fpm/www.sock). -Next, you will configure Apache to communicate with PHP-FPM, allowing your server to process and serve dynamic PHP pages. +PHP-FPM is now active and ready to process requests using its Unix socket (`/run/php-fpm/www.sock`). +Next, you’ll configure Apache to communicate with PHP-FPM, allowing your Arm server to process and serve dynamic PHP pages. ## Install the Apache PHP8 module -If you prefer to have Apache handle PHP execution directly (instead of using PHP-FPM), you can install the Apache PHP 8 module, which integrates PHP into Apache using the `mod_php` interface: +If you want Apache to handle PHP execution directly (instead of using PHP-FPM), install the Apache PHP 8 module, which integrates PHP into Apache using the `mod_php` interface: ```console sudo zypper install apache2-mod_php8 @@ -105,28 +104,26 @@ Once the module is installed, restart Apache to load the new configuration: ```console sudo systemctl restart apache2 ``` -Next, you will test PHP execution by creating a simple PHP page and verifying that Apache can correctly render dynamic content. +Next, you’ll test PHP execution by creating a simple PHP page and verifying that Apache can correctly render dynamic content. -## Test PHP -Now that PHP and Apache are installed, you can verify that everything is working correctly. +## Test PHP functionality +Now that PHP and Apache are installed, you can verify that everything is working correctly on your Arm-based VM. -### Create a Test Page +## Create a PHP test page Create a simple PHP file that displays detailed information about your PHP installation: ```console echo "" | sudo tee /srv/www/htdocs/info.php ``` -This creates a file named `info.php` inside Apache's web root directory `(/srv/www/htdocs/)`. When you open this file in a browser, it will display the PHP configuration page. +This creates a file named `info.php` inside Apache’s web root directory (`/srv/www/htdocs/`). When you open this file in a browser, it displays the PHP configuration page. -### Test from Inside the VM +## Verify PHP from inside the VM You can verify that PHP and Apache are communicating correctly by testing the web server locally using curl: ```console curl http://localhost/info.php ``` -- `curl` fetches the page from the local Apache server. -- If PHP is working, you will see a large block of HTML code as output. This is the rendered output of the phpinfo() function. -- This confirms that Apache successfully passed the request to the PHP interpreter and returned the generated HTML response. +When you run the `curl` command, it fetches the PHP test page from the local Apache server. If PHP is configured correctly, you'll see a large block of HTML code in the output, which is the result of the `phpinfo()` function. This confirms that Apache is successfully passing requests to the PHP interpreter and returning the generated HTML response. You should see output similar to: @@ -148,20 +145,25 @@ h1 {font-size: 150%;} h2 {font-size: 125%;} h2 a:link, h2 a:visited{color: inherit; background: inherit;} ``` -This long HTML output represents the PHP configuration page content. +This long HTML output represents the PHP configuration page content. -### Test from Your Browser -Now, let's verify that your PHP setup works correctly from outside the VM. +## Verify PHP from your browser +Now, verify that your PHP setup works correctly from outside the VM. Open a web browser on your local machine (such as Chrome, Firefox, or Edge) and enter the following URL in the address bar: ```console http:///info.php ``` -- Replace `` with the public IP of your Google Cloud Axion VM. +Replace `` with the public IP of your Google Cloud Axion C4A Arm VM. -If everything is set up correctly, you will see a PHP Info page in your browser. It looks like this: +If your configuration is correct, your browser displays the PHP Info page. This page shows detailed information about your PHP environment, including version, build options, and loaded modules, as shown below: -![PHP-info page alt-text#center](images/php-web.png "Figure 1: PHP info") +![Screenshot of PHP info page in browser showing PHP configuration details on Google Cloud Axion C4A Arm VM. alt-text#center](images/php-web.png "PHP info page in browser on Google Cloud Axion C4A Arm VM") -Successfully loading the PHP Info page in your browser confirms that your PHP and Apache environment on Google Cloud C4A is configured and functioning properly. -You are now ready to proceed to the benchmarking and performance testing phase. +Successfully loading the PHP Info page in your browser confirms that your PHP and Apache environment on Google Cloud Axion C4A Arm is configured and functioning properly. + +## What's next? + +You’ve validated your PHP baseline setup on an Arm-based Google Cloud VM. You’re ready to move on to benchmarking and performance testing for your PHP workloads on Arm. + +Next, you’ll learn how to measure PHP performance on Google Cloud Axion C4A Arm VMs. You’ll use benchmarking tools to evaluate throughput, latency, and resource utilization. This helps you optimize your PHP environment for production workloads and identify areas for improvement. diff --git a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/benchmarking.md b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/benchmarking.md index e9a72f8111..3dfb1d1eb5 100644 --- a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/benchmarking.md +++ b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/benchmarking.md @@ -6,19 +6,18 @@ weight: 6 layout: learningpathall --- +## PHP benchmarking using PHPBench -## PHP Benchmarking using PHPBench +In this section, you will learn how to benchmark PHP performance using PHPBench, a modern and extensible benchmarking framework for PHP applications. You will install PHPBench, run sample tests, and interpret key metrics such as mode time, variance, and throughput. You will then analyze the results to understand how your Google Cloud C4A (Axion Arm64) virtual machine performs on common operations like string manipulation and array processing. PHPBench is a flexible micro-benchmarking tool designed to measure PHP code performance precisely and repeatably. -In this section, you will learn how to benchmark PHP performance using PHPBench, a modern and extensible benchmarking framework for PHP applications. You will install PHPBench, run sample tests, and interpret key metrics such as mode time, variance, and throughput. -You will then analyze the results to understand how your Google Cloud C4A (Axion Arm64) virtual machine performs on common operations like string manipulation and array processing. +With PHPBench, you can: + * Measure the execution time of PHP functions or code blocks + * Identify performance regressions between versions + * Automate performance testing across CI/CD pipelines + * Track results over time to detect optimizations or slowdowns + * Track results over time to detect optimizations or slowdowns -PHPBench is a flexible micro-benchmarking tool designed to measure PHP code performance precisely and repeatably. It allows developers to: - * Measure the execution time of PHP functions or code blocks. - * Identify performance regressions between versions. - * Automate performance testing across CI/CD pipelines. - * Track results over time to detect optimizations or slowdowns. - -### Download Composer Installer +## Download Composer installer Before installing PHPBench, you need Composer, which is PHP's dependency manager. Composer handles library installations, versioning, and autoloading, ensuring tools like PHPBench run consistently across environments. @@ -29,7 +28,7 @@ sudo php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" This command downloads a PHP script called `composer-setup.php`, which will install Composer. -### Install the phar extension +## Install the phar extension Composer requires PHP's phar (PHP Archive) extension to run. This extension allows PHP to execute .phar archive files, self-contained PHP applications like Composer and PHPBench are distributed in this format. @@ -38,7 +37,7 @@ Install the extension with: sudo zypper install -y php8-phar ``` -### Install Composer system-wide +## Install Composer system-wide Now, install Composer globally so it is available for all users and can be executed from any directory: @@ -56,7 +55,7 @@ Use it: php /usr/local/bin/composer ``` Composer is now installed system-wide at /usr/local/bin/composer and ready to manage PHP dependencies. -### Remove the installer script +## Remove the installer script After successfully installing Composer, remove the installer file to keep your environment clean: @@ -65,7 +64,7 @@ sudo php -r "unlink('composer-setup.php');" ``` Since Composer is now installed system-wide, the installer file is no longer needed. -### Verify Composer installation +## Verify Composer installation To confirm that Composer was installed correctly and is accessible globally, run: ```console @@ -80,7 +79,7 @@ Run the "diagnose" command to get more detailed diagnostics output. Composer is now successfully installed and you can proceed to installing PHPBench. -### Install PHPBench globally +## Install PHPBench globally PHPBench is a powerful benchmarking tool for measuring the performance of PHP code. Install it globally using Composer so you can access it from any directory: @@ -90,16 +89,16 @@ composer global require phpbench/phpbench This installs `phpbench` in your user's global Composer directory, typically located under `$HOME/.config/composer/` -### Add Composer global bin to PATH +## Add Composer global bin to PATH -To make `phpbench` accessible from any terminal session, add Composer's global binary path to your system's environment PATH: +To make `phpbench` accessible from any terminal session, add Composer's global binary path to your system's environment `PATH`: ```console export PATH="$HOME/.config/composer/vendor/bin:$PATH" echo 'export PATH=$HOME/.config/composer/vendor/bin:$PATH' >> ~/.bashrc source ~/.bashrc ``` -### Verify PHPBench installation +## Verify PHPBench installation Once installed, verify that PHPBench is working correctly: ```console phpbench --version @@ -111,7 +110,7 @@ phpbench 1.2.14 ``` PHPBench is now installed and ready to run. -### Create a Benchmark Directory +## Create a benchmark directory Create a new PHP benchmark file using your preferred text editor: @@ -119,7 +118,7 @@ Create a new PHP benchmark file using your preferred text editor: mkdir ~/phpbench-tests cd ~/phpbench-tests ``` -### Create a Benchmark Script +## Create a benchmark script Create a new PHP benchmark file using your preferred editor: @@ -159,11 +158,9 @@ class ExampleBenchmark ``` This sets up two basic benchmark tests, string concatenation and array push. -- @Revs(1000): Each benchmark repeats 1000 times per iteration. -- @Iterations(5): The benchmark runs 5 separate iterations and averages results. -- `benchStringConcat` and `benchArrayPush`: Sample benchmarks for string and array operations. +The `@Revs(1000)` annotation ensures each benchmark function is executed 1,000 times per iteration, while `@Iterations(5)` means the benchmark runs five separate iterations and averages the results for greater accuracy. The sample benchmarks, `benchStringConcat` and `benchArrayPush`, measure the performance of string concatenation and array operations, respectively. -### Run the Benchmarks +## Run the benchmarks Execute the benchmark suite you created using the `phpbench run` command: @@ -184,23 +181,22 @@ with PHP version 8.0.30, xdebug ❌, opcache ❌ Subjects: 2, Assertions: 0, Failures: 0, Errors: 0 ``` -### Understanding PHP benchmark metrics and results with PHPBench +## Understanding PHPBench metrics and results + +PHPBench reports several key metrics to help you understand your code's performance, as defined in the table below: -- **benchStringConcat** → Name of the benchmark function; in this case, it measures string concatenation performance. -- **benchArrayPush** → Name of another benchmark function; here, measuring array push performance. -- **xdebug ❌** → Xdebug extension is disabled, which is good because Xdebug slows down execution. -- **opcache ❌** → Opcache is disabled, so you’re measuring raw execution without caching optimizations. -- **I4** → Number of iterations per measurement. - - `I4` means 4 iterations of this benchmark were executed for one measurement. -- **Mo13.438μs** → Mode (or mean) execution time for the benchmark. - - `Mo` = Mode, showing the most common measured execution time across iterations. - - `13.438 μs` = 13.438 microseconds per iteration. -- **(±0.82%)** → Variation or coefficient of variation in the measurements. - - Shows consistency of results. - - Lower percentage → more stable and reliable benchmark. +| Metric/Term | Meaning | +|---------------------|-----------------------------------------------------------------------------------------------------------| +| **benchStringConcat** | Name of the benchmark function; measures string concatenation performance. | +| **benchArrayPush** | Name of the benchmark function; measures array push performance. | +| **xdebug ❌** | Xdebug extension is disabled, which is good because Xdebug slows down execution. | +| **opcache ❌** | Opcache is disabled, so you’re measuring raw execution without caching optimizations. | +| **I4** | Number of iterations per measurement. `I4` means 4 iterations of this benchmark were executed per run. | +| **Mo13.438μs** | Mode (most common) execution time for the benchmark. `13.438 μs` = 13.438 microseconds per iteration. | +| **(±0.82%)** | Variation or coefficient of variation in the measurements. Lower percentage means more stable results. | -### Benchmark summary on Arm64 +## Benchmark summary on Arm64 Results from the benchmark suite executed on a Google Cloud c4a-standard-4 (Arm64) instance with 4 vCPUs and 16 GB memory, running SUSE Linux: | Benchmark Function | Iterations | Mode Execution Time (μs)| Variation (%) | Notes | @@ -209,7 +205,7 @@ Results from the benchmark suite executed on a Google Cloud c4a-standard-4 (Arm6 | benchArrayPush | I4 | 8.487 μs | ±0.51% | Measures performance of pushing elements to an array| -### Benchmark summary on x86_64 +## Benchmark summary on x86_64 For comparison, the same PHPBench test suite was executed on a Google Cloud c4-standard-4 (x86_64) instance with 4 vCPUs and 15 GB memory, running SUSE Linux: | Benchmark Function | Iterations | Mode Execution Time (μs) | Variation (%) | Notes | @@ -217,14 +213,17 @@ For comparison, the same PHPBench test suite was executed on a Google Cloud c4-s | benchStringConcat | I4 | 13.493 | ±1.80% | Measures performance of string concatenation | | benchArrayPush | I4 | 7.395 | ±1.07% | Measures performance of pushing elements to an array | -### PHP performance benchmarking comparison on Arm64 and x86_64 +## PHP performance benchmarking comparison on Arm64 and x86_64 When comparing benchmark results between Google Cloud Axion C4A (Arm64) and x86-based C4 (x86_64) instances, you will see: -The results show that both architectures deliver nearly identical execution times for typical PHP operations, with Arm64 showing slightly lower variation (more stable performance). The Arm64 instance performs within ~15% of x86_64, showing strong memory throughput and cache performance for dynamic array allocation workloads. -Low variance (±0.82% / ±0.51: Indicates that the Axion-based Arm cores on C4A provide stable, repeatable performance, ideal for predictable PHP application behavior in production. +- The results show that both architectures deliver nearly identical execution times for typical PHP operations, with Arm64 showing a slightly lower level of variation which indicates more stable performance. +- The Arm64 instance performs within ~15% of x86_64, showing strong memory throughput and cache performance for dynamic array allocation workloads. +- Low variance (±0.82% / ±0.51) indicates that the Axion-based Arm cores on C4A provide stable, repeatable performance, ideal for predictable PHP application behavior in production. + +## Summary -These results show that PHP performs consistently across both architectures, and that Google Cloud Axion C4A Arm64 VMs deliver competitive, reliable performance for PHP-based web and backend applications. +These results show that PHP performs consistently across both architectures, and that Google Cloud Axion C4A Arm64 VMs deliver competitive, reliable performance for PHP-based web and backend applications. You can confidently deploy PHP workloads on Arm64 instances, knowing that performance is stable and comparable to x86_64 systems. This benchmarking process helps you identify areas for optimization and ensures your applications run efficiently on Arm platforms. As you continue developing and scaling PHP solutions, regularly benchmarking on Arm64 will help you maintain high performance and take advantage of the cost and energy efficiency benefits offered by Arm-based cloud infrastructure. diff --git a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/installation.md b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/installation.md index 4f0eb207a2..2bd9b20f28 100644 --- a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/installation.md +++ b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/installation.md @@ -6,44 +6,44 @@ weight: 4 layout: learningpathall --- -## Install PHP -In this section, you will install PHP together with the Apache web server and several commonly used PHP extensions on a SUSE Arm-based virtual machine. This forms the foundation for running and serving dynamic PHP applications on Arm-based machines. +## Overview +In this section, you’ll install PHP, the Apache web server, and several commonly used PHP extensions on a SUSE Arm-based virtual machine. This setup forms the foundation for running and serving dynamic PHP applications on Arm-based machines. -### Update the system -Before installing any software, make sure your system is up to date with the latest packages and security patches. +## Update the system +Before installing any software, make sure your system has the latest packages and security patches: ```console sudo zypper refresh sudo zypper update -y ``` -### Install PHP, Apache, and common extensions -Now install PHP, PHP-FPM, Apache web server, and some commonly used PHP extensions. +## Install PHP, Apache, and common extensions +Install PHP, PHP-FPM, Apache web server, and some commonly used PHP extensions by running: ```console sudo zypper install -y php php-cli php-fpm php-mysql php-xml php-mbstring php-opcache apache2 ``` -Package breakdown: -| Package | Description | -| ---------------- | ------------------------------------------------------------------------------------------------ | -| **php** | Core PHP interpreter used to run web applications. | -| **php-cli** | Enables running PHP scripts from the command line. Useful for testing and automation. | -| **php-fpm** | FastCGI Process Manager — manages PHP worker processes and improves concurrency and performance. | -| **php-mysql** | Provides MySQL/MariaDB database connectivity for PHP. | -| **php-xml** | Adds support for parsing and manipulating XML data. | -| **php-mbstring** | Adds multi-byte string handling, required by many web frameworks. | -| **php-opcache** | Boosts performance by caching precompiled PHP bytecode in memory, reducing runtime overhead. | -| **apache2** | Installs the Apache HTTP web server, which will serve PHP files via mod_php or FastCGI. | - - -### Enable and start Apache: -Once Apache is installed, you need to enable and start the service so that it runs automatically on boot and begins serving HTTP requests immediately. + +Here’s what each package in the installation command does: + +- `php`: provides the core PHP interpreter for running web applications +- `php-cli`: allows you to run PHP scripts from the command line, which is useful for testing and automation tasks +- `php-fpm`: installs the FastCGI Process Manager, which manages PHP worker processes and helps improve concurrency and performance +- `php-mysql`: enables PHP to connect to MySQL or MariaDB databases +- `php-xml`: adds support for parsing and manipulating XML data +- `php-mbstring`: enables multi-byte string handling, which is required by many web frameworks +- `php-opcache`: improves performance by caching precompiled PHP bytecode in memory, reducing runtime overhead +- `apache2`: installs the Apache HTTP web server, which serves PHP files using either mod_php or FastCGI + + +## Enable and start Apache +After installing Apache, enable and start the service so it runs automatically on boot and begins serving HTTP requests: ```console sudo systemctl enable apache2 sudo systemctl start apache2 sudo systemctl status apache2 ``` -If everything starts correctly, the output should look similar to: +If everything starts correctly, the output is similar to: ```output ● apache2.service - The Apache Webserver @@ -65,17 +65,25 @@ Oct 15 18:55:30 pareena-php-test systemd[1]: Starting The Apache Webserver... Oct 15 18:55:30 pareena-php-test systemd[1]: Started The Apache Webserver. ``` -### Verify PHP installation +## Verify PHP installation After installation, verify that PHP is installed correctly and view the installed version: ```console php -v ``` -You should see output similar to: +The output is similar to: ```output PHP 8.0.30 (cli) (built: Nov 25 2024 12:00:00) ( NTS ) Copyright (c) The PHP Group Zend Engine v4.0.30, Copyright (c) Zend Technologies with Zend OPcache v8.0.30, Copyright (c), by Zend Technologies + +{{% notice success %}} +PHP is installed and ready for use on your Arm-based SUSE VM. +{{% /notice %}} ``` -You can now proceed to the baseline testing section, where you’ll create and load a simple PHP web page to confirm that Apache and PHP are working together on your SUSE Arm-based virtual machine. +## What's next? + +You've installed PHP, Apache, and essential PHP extensions on your SUSE Arm-based virtual machine. Apache is running and ready to serve dynamic PHP applications. You verified your PHP installation and confirmed that your environment is set up for web development on Arm. + +You can move on to the baseline testing section, where you'll create and load a simple PHP web page to confirm that Apache and PHP are working together on your SUSE Arm-based virtual machine. diff --git a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/instance.md b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/instance.md index 21d515ef80..e7b1e2fe77 100644 --- a/content/learning-paths/servers-and-cloud-computing/php-on-gcp/instance.md +++ b/content/learning-paths/servers-and-cloud-computing/php-on-gcp/instance.md @@ -1,5 +1,5 @@ --- -title: Create a Google Axion C4A Arm virtual machine on GCP +title: Provision a Google Axion C4A Arm virtual machine on GCP weight: 3 ### FIXED, DO NOT MODIFY @@ -8,24 +8,36 @@ layout: learningpathall ## Overview -In this section, you will learn how to provision a Google Axion C4A Arm virtual machine on Google Cloud Platform (GCP) using the `c4a-standard-4` (4 vCPUs, 16 GB memory) machine type in the Google Cloud Console. +In this section, you’ll provision a Google Cloud Axion C4A Arm virtual machine (VM) using the `c4a-standard-4` (four vCPUs, 16 GB memory) machine type in the Google Cloud Console. This process sets up a high-performance Arm server for PHP workloads, leveraging the scalability and efficiency of Axion C4A and SUSE Linux on Google Cloud. {{% notice Note %}} For support on GCP setup, see the Learning Path [Getting started with Google Cloud Platform](https://learn.arm.com/learning-paths/servers-and-cloud-computing/csp/google/). {{% /notice %}} -## Provision a Google Axion C4A Arm VM in Google Cloud Console +## Provision your Google Axion C4A instance -To create a virtual machine based on the C4A instance type: -- Navigate to the [Google Cloud Console](https://console.cloud.google.com/). -- Go to Compute Engine > VM Instances and select Create Instance. -- Under Machine configuration: - - Populate fields such as Instance name, Region, and Zone. - - Set **Series** to `C4A`. - - Select `c4a-standard-4` for machine type. +Follow these steps to create a `c4a-standard-4` instance configured for PHP on Arm. This walkthrough covers machine selection, OS image choice, and basic networking. Ensure your Google Cloud project has billing enabled and you have permission to create Compute Engine instances. - ![Create a Google Axion C4A Arm virtual machine in the Google Cloud Console with c4a-standard-4 selected alt-text#center](images/gcp-vm.png "Creating a Google Axion C4A Arm virtual machine in Google Cloud Console") -- Under **OS and Storage**, select **Change**, then choose an Arm64-based OS image. For this Learning Path, use **SUSE Linux Enterprise Server**. Pick the preferred version for your Operating System. Ensure you select the **Arm image** variant. Click **Select**. -- Under **Networking**, enable **Allow HTTP traffic**. -- Click **Create** to launch the instance. +Follow these steps to get started: + +- Go to the [Google Cloud Console](https://console.cloud.google.com/) +- In the left menu, select **Compute Engine** > **VM Instances** +- Select **Create Instance** +- Under **Machine configuration** + - Enter your **Instance name**, **Region**, and **Zone** + - Set **Series** to `C4A` + - Set **Machine type** to `c4a-standard-4` (four vCPUs, 16 GB memory) as shown below: + + ![Screenshot of Google Cloud Console showing c4a-standard-4 selected for Axion C4A Arm VM creation. alt-text#center](images/gcp-vm.png "Creating a Google Axion C4A Arm virtual machine in Google Cloud Console") + +- Under **OS and Storage**, select **Change**: + - Choose an Arm64-based OS image; for this Learning Path, select **SUSE Linux Enterprise Server** + - Select your preferred version and ensure you choose the **Arm image** variant + - Select **OK** +- Under **Networking**, enable **Allow HTTP traffic** +- Select **Create** to launch the instance + +## What's next? + +You’ve successfully provisioned an Arm-based Axion C4A VM on Google Cloud. Your server is ready for PHP installation and cloud-native development on Arm. Next, you’ll install PHP and configure your environment to run dynamic web applications on your new Arm server. diff --git a/content/learning-paths/servers-and-cloud-computing/profiling-for-neoverse/_index.md b/content/learning-paths/servers-and-cloud-computing/profiling-for-neoverse/_index.md index 5b9969ac69..4678c5761a 100644 --- a/content/learning-paths/servers-and-cloud-computing/profiling-for-neoverse/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/profiling-for-neoverse/_index.md @@ -13,7 +13,6 @@ prerequisites: - An Arm Neoverse-based (N1, N2 or V1) computer running Linux. For your host OS, you can use Amazon Linux 2023 or newer, Debian 10 or newer, RHEL 8 or newer, or Ubuntu 20.04 or newer. author: Julie Gaskin -author: Julie Gaskin ### Tags skilllevels: Introductory diff --git a/content/learning-paths/servers-and-cloud-computing/pytorch-llama/pytorch-llama-frontend.md b/content/learning-paths/servers-and-cloud-computing/pytorch-llama/pytorch-llama-frontend.md index ffae9d430a..8f2442978b 100644 --- a/content/learning-paths/servers-and-cloud-computing/pytorch-llama/pytorch-llama-frontend.md +++ b/content/learning-paths/servers-and-cloud-computing/pytorch-llama/pytorch-llama-frontend.md @@ -74,3 +74,15 @@ Collecting usage statistics. To deactivate, set browser.gatherUsageStats to fals Open the local URL from the link above in a browser and you should see the chatbot running: ![Chatbot](images/chatbot.png) + +{{% notice Note %}} +If you are running a server in the cloud, the local URL may not connect when starting the frontend server. If this happens, stop the frontend server and reconnect to your instance using port forwarding (see code below). After reconnecting, activate the `venv` and start the Streamlit frontend server. + +```sh +# Replace with your .pem file and machine's public IP +ssh -i /path/to/your/key.pem -L 8501:localhost:8501 ubuntu@ +source torch_env/bin/activate +cd torchchat +streamlit run browser/browser.py +``` +{{% /notice %}} \ No newline at end of file diff --git a/content/learning-paths/servers-and-cloud-computing/rag/_demo.md b/content/learning-paths/servers-and-cloud-computing/rag/_demo.md index ca62fbf8e4..6beba4a9b0 100644 --- a/content/learning-paths/servers-and-cloud-computing/rag/_demo.md +++ b/content/learning-paths/servers-and-cloud-computing/rag/_demo.md @@ -33,8 +33,6 @@ rag_data_cutoff_date: 2025/01/17 title_chatbot_area: Arm RAG Demo -prismjs: true - ### Specific details to this demo @@ -55,7 +53,6 @@ tps_ranges: ### FIXED, DO NOT MODIFY # ================================================================================ demo_template_name: llm_chatbot_rag_demo # allows the 'demo.html' partial to route to the correct Configuration and Demo/Stats sub partials for page render. -weight: 2 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. --- diff --git a/content/learning-paths/servers-and-cloud-computing/ran/_index.md b/content/learning-paths/servers-and-cloud-computing/ran/_index.md index 0839b2b2b2..0bf4a8e6eb 100644 --- a/content/learning-paths/servers-and-cloud-computing/ran/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/ran/_index.md @@ -59,5 +59,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/snappy/_index.md b/content/learning-paths/servers-and-cloud-computing/snappy/_index.md index 0956ccf203..3bf9309db0 100644 --- a/content/learning-paths/servers-and-cloud-computing/snappy/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/snappy/_index.md @@ -48,5 +48,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/sve2-match/_index.md b/content/learning-paths/servers-and-cloud-computing/sve2-match/_index.md index 8e9369652f..cfb3cb802e 100644 --- a/content/learning-paths/servers-and-cloud-computing/sve2-match/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/sve2-match/_index.md @@ -46,5 +46,4 @@ further_reading: weight: 1 # _index.md always has weight of 1 to order correctly layout: "learningpathall" # All files under learning paths have this same wrapper learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. -layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/vllm/vllm-run.md b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-run.md index 4e5022d2bb..eb0f2a17f6 100644 --- a/content/learning-paths/servers-and-cloud-computing/vllm/vllm-run.md +++ b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-run.md @@ -34,32 +34,36 @@ Use a text editor to save the Python script below in a file called `batch.py`: import json from vllm import LLM, SamplingParams -# Sample prompts. -prompts = [ - "Write a hello world program in C", - "Write a hello world program in Java", - "Write a hello world program in Rust", -] - -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=256) - -# Create an LLM. -llm = LLM(model="Qwen/Qwen2.5-0.5B-Instruct", dtype="bfloat16") - -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.generate(prompts, sampling_params) - -# Print the outputs. -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - result = { - "Prompt": prompt, - "Generated text": generated_text - } - print(json.dumps(result, indent=4)) +if __name__ == '__main__': + # Sample prompts. + prompts = [ + "Write a hello world program in C", + "Write a hello world program in Java", + "Write a hello world program in Rust", + ] + + # Modify model here + MODEL = "Qwen/Qwen2.5-0.5B-Instruct" + + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=256) + + # Create an LLM. + llm = LLM(model=MODEL, dtype="bfloat16", max_num_batched_tokens=32768) + + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + result = { + "Prompt": prompt, + "Generated text": generated_text + } + print(json.dumps(result, indent=4)) ``` The script uses `bfloat16` precision. @@ -75,36 +79,62 @@ python ./batch.py The output shows vLLM starting, the model loading, and the batch processing of the three prompts: ```output -INFO 12-12 22:52:57 config.py:441] This model supports multiple tasks: {'generate', 'reward', 'embed', 'score', 'classify'}. Defaulting to 'generate'. -WARNING 12-12 22:52:57 config.py:567] Async output processing is not supported on the current platform type cpu. -WARNING 12-12 22:52:57 cpu.py:56] CUDA graph is not supported on CPU, fallback to the eager mode. -WARNING 12-12 22:52:57 cpu.py:68] Environment variable VLLM_CPU_KVCACHE_SPACE (GB) for CPU backend is not set, using 4 by default. -INFO 12-12 22:52:57 importing.py:15] Triton not installed or not compatible; certain GPU-related functions will not be available. -INFO 12-12 22:52:57 llm_engine.py:250] Initializing an LLM engine (v0.6.4.post2.dev322+g72ff3a96) with config: VllmConfig(model_config=, cache_config=, parallel_config=ParallelConfig(pipeline_parallel_size=1, tensor_parallel_size=1, worker_use_ray=False, max_parallel_loading_workers=None, disable_custom_all_reduce=False, tokenizer_pool_config=None, ray_workers_use_nsight=False, placement_group=None, distributed_executor_backend=None, worker_cls='vllm.worker.cpu_worker.CPUWorker', sd_worker_cls='auto', world_size=1, rank=0), scheduler_config=SchedulerConfig(runner_type='generate', max_num_batched_tokens=32768, max_num_seqs=256, max_model_len=32768, num_lookahead_slots=0, delay_factor=0.0, enable_chunked_prefill=False, is_multimodal_model=False, preemption_mode=None, num_scheduler_steps=1, multi_step_stream_outputs=True, send_delta_data=False, policy='fcfs', chunked_prefill_enabled=False), device_config=, load_config=LoadConfig(load_format=, download_dir=None, model_loader_extra_config=None, ignore_patterns=['original/**/*']), lora_config=None, speculative_config=None, decoding_config=DecodingConfig(guided_decoding_backend='xgrammar'), observability_config=ObservabilityConfig(otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), prompt_adapter_config=None, quant_config=None, compilation_config=CompilationConfig(level=0, debug_dump_path='', backend='', custom_ops=[], splitting_ops=['vllm.unified_attention', 'vllm.unified_attention_with_output'], use_inductor=True, candidate_compile_sizes=[], inductor_compile_config={}, inductor_passes={}, use_cudagraph=False, cudagraph_num_of_warmups=0, cudagraph_capture_sizes=None, cudagraph_copy_inputs=False, pass_config=PassConfig(dump_graph_stages=[], dump_graph_dir=PosixPath('.'), enable_fusion=True, enable_reshape=True), compile_sizes=[], capture_sizes=[256, 248, 240, 232, 224, 216, 208, 200, 192, 184, 176, 168, 160, 152, 144, 136, 128, 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 4, 2, 1], enabled_custom_ops=Counter(), disabled_custom_ops=Counter(), compilation_time=0.0, static_forward_context={}), kv_transfer_config=None, instance_id='5c715'),use_cached_outputs=False, -INFO 12-12 22:52:58 cpu.py:33] Cannot use _Backend.FLASH_ATTN backend on CPU. -INFO 12-12 22:52:58 selector.py:141] Using Torch SDPA backend. -INFO 12-12 22:52:58 weight_utils.py:243] Using model weights format ['*.safetensors'] -INFO 12-12 22:52:58 weight_utils.py:288] No model.safetensors.index.json found in remote. +INFO 10-23 18:38:40 [__init__.py:216] Automatically detected platform cpu. +INFO 10-23 18:38:42 [utils.py:233] non-default args: {'dtype': 'bfloat16', 'max_num_batched_tokens': 32768, 'disable_log_stats': True, 'model': 'Qwen/Qwen2.5-0.5B-Instruct'} +INFO 10-23 18:38:42 [model.py:547] Resolved architecture: Qwen2ForCausalLM +`torch_dtype` is deprecated! Use `dtype` instead! +INFO 10-23 18:38:42 [model.py:1510] Using max model len 32768 +WARNING 10-23 18:38:42 [cpu.py:117] Environment variable VLLM_CPU_KVCACHE_SPACE (GiB) for CPU backend is not set, using 4 by default. +INFO 10-23 18:38:42 [arg_utils.py:1166] Chunked prefill is not supported for ARM and POWER and S390X CPUs; disabling it for V1 backend. +INFO 10-23 18:38:44 [__init__.py:216] Automatically detected platform cpu. +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:46 [core.py:644] Waiting for init message from front-end. +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:46 [core.py:77] Initializing a V1 LLM engine (v0.11.0) with config: model='Qwen/Qwen2.5-0.5B-Instruct', speculative_config=None, tokenizer='Qwen/Qwen2.5-0.5B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=32768, download_dir=None, load_format=auto, tensor_parallel_size=1, pipeline_parallel_size=1, data_parallel_size=1, disable_custom_all_reduce=True, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cpu, structured_outputs_config=StructuredOutputsConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_parser=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2.5-0.5B-Instruct, enable_prefix_caching=True, chunked_prefill_enabled=False, pooler_config=None, compilation_config={"level":2,"debug_dump_path":"","cache_dir":"","backend":"inductor","custom_ops":["none"],"splitting_ops":null,"use_inductor":true,"compile_sizes":null,"inductor_compile_config":{"enable_auto_functionalized_v2":false,"dce":true,"size_asserts":false,"nan_asserts":false,"epilogue_fusion":true},"inductor_passes":{},"cudagraph_mode":0,"use_cudagraph":true,"cudagraph_num_of_warmups":0,"cudagraph_capture_sizes":[],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"use_inductor_graph_partition":false,"pass_config":{},"max_capture_size":null,"local_cache_dir":null} +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:46 [importing.py:63] Triton not installed or not compatible; certain GPU-related functions will not be available. +(EngineCore_DP0 pid=8933) WARNING 10-23 18:38:47 [cpu.py:316] Pin memory is not supported on CPU. +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0 +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:47 [parallel_state.py:1208] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0 +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:47 [cpu_model_runner.py:106] Starting to load model Qwen/Qwen2.5-0.5B-Instruct... +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:47 [cpu.py:104] Using Torch SDPA backend. +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:47 [weight_utils.py:392] Using model weights format ['*.safetensors'] +(EngineCore_DP0 pid=8933) INFO 10-23 18:38:47 [weight_utils.py:450] No model.safetensors.index.json found in remote. Loading safetensors checkpoint shards: 0% Completed | 0/1 [00:00\n\nint main() {\n printf(\"Hello, World!\\n\");\n return 0;\n}\n```\n\nThis program does the following:\n\n1. Includes the `` header to use the `printf` function.\n2. Defines a `main` function, which is the entry point of the program.\n3. Uses `printf` to output the message \"Hello, World!\" to the console.\n4. Returns 0 to indicate that the program executed successfully.\n\nWhen you run this program, you should see the output:\n\n```\nHello, World!\n``` \n\nThis is the basic structure of a C program, providing a simple example of how to create, run, and display a basic program. Note that C is a high-level programming language, meaning that it provides low-level operations for users to interact with the hardware, but at the same time, it is a low-level language that needs to be compiled and linked into an executable file (.exe) that the computer's operating system can load and run. C, as a compiled language, often requires additional libraries and tools for use. For more information, you can refer to the C Programming Language documentation." + "Generated text": "++ to print \"Hello, World!\" on the console.\n\n```cpp\n#include \n\nint main() {\n std::cout << \"Hello, World!\" << std::endl;\n return 0;\n}\n```\n\nThis program demonstrates the use of the `std::cout` stream object in C++ to output text to the console. The `<<` operator is used to print the text \"Hello, World!\" to the console, followed by a newline character (`std::endl`). The `return 0;` statement indicates that the program should exit with a success code. The `main` function is the entry point of the program. When executed, the `main` function will invoke the `std::cout` object and print \"Hello, World!\" to the console. The `return 0;` statement indicates that the program is successful and should not throw any errors." } { "Prompt": "Write a hello world program in Java", - "Generated text": "\n\nCertainly! Below is a simple `HelloWorld.java` file that prints \"Hello, World!\" to the console when you run it:\n\n```java\npublic class HelloWorld {\n public static void main(String[] args) {\n System.out.println(\"Hello, World!\");\n }\n}\n```\n\nTo compile this program, you would use an integrated development environment (IDE) like IntelliJ IDEA, Eclipse, or NetBeans. Here is how you can compile it:\n\n1. Open a terminal or command prompt.\n2. Navigate to the directory where you saved the `HelloWorld.java` file.\n3. Compile the program using the following command:\n ```bash\n javac HelloWorld.java\n ```\n4. Run the compiled program using the following command:\n ```bash\n java HelloWorld\n ```\n\nThis will output:\n```\nHello, World!\n```" + "Generated text": "\n\nSure! Here is a simple \"Hello World\" program in Java:\n\n```java\npublic class HelloWorld {\n public static void main(String[] args) {\n System.out.println(\"Hello World!\");\n }\n}\n```\n\nTo run this program, simply compile it using the Java compiler:\n\n```\njavac HelloWorld.java\n```\n\nThen run it using the `java` command:\n\n```\njava HelloWorld\n```\n\nYou should see the message \"Hello World!\" printed to the console. \n\nThis is a basic example of how to write a Java program. Java is a popular programming language and there are many other examples and libraries available for more advanced programming tasks. \n\nIf you're new to Java, you might want to start with the official Java tutorials or the official Java documentation. There are also many online resources and communities that can help you learn Java. For a complete introduction, I recommend checking out the Java Tutorial on Codecademy. \n\nLet me know if you have any more questions!" } { "Prompt": "Write a hello world program in Rust", - "Generated text": "\n\nCertainly! Here is a simple example of a `HelloWorld` program in Rust:\n\n```rust\nfn main() {\n println!(\"Hello, world!\");\n}\n```\n\n### Explanation:\n\n- `fn main()`: This is the entry point of the program.\n- `println!`: This function is used to print out the message `Hello, world!` to the console.\n- `println!`: The `println!` macro is used to print messages in Rust.\n\n### How to Run the Program:\n\n1. Make sure you have Rust installed on your system.\n2. Save the above code in a file with a `.rs` extension, e.g., `hello.rs`.\n3. Open a terminal or command prompt and navigate to the directory where the file is saved.\n4. Run the program by typing `rustc hello.rs` (if you're using `rustc`, you don't need to specify the file extension).\n5. After the program runs, it should print the message `Hello, world!` to the console.\n\n### Running in Development:\n\nIf you want to run the program in development mode to see the output in the terminal, you can use the `-o` flag:\n\n```sh\nrustc -o hello-dev hello.rs\n./" + "Generated text": ".\nCertainly! Here's a simple \"Hello, World!\" program in Rust:\n\n```rust\nfn main() {\n println!(\"Hello, World!\");\n}\n```\n\nThis program defines a `main` function that runs when the program is executed. Inside the `main` function, the `println!` macro is used to print the string \"Hello, World!\" to the console. \n\nYou can save this code in a file with a `.rs` extension, for example `hello.rs`, and run it using the command `rustc hello.rs`, which will compile and run the program. When you run the program, you should see the output \"Hello, World!\" printed to the console. \n\nIn Rust, the `main` function is the entry point of the program, and the program starts executing from there. The `println!` macro is a function that prints a string to the console. Other important functions in Rust include `println!`, `printlnln`, `println!`, `printlnln`, `println!`, and `printlnln`, which provide similar functionality for different purposes. \n\nYou can also use the `println!` macro to print more complex data structures to the console, such as arrays, slices, strings, numbers, booleans, and" } ``` -You can try with other prompts and models such as `meta-llama/Llama-3.2-1B`. - -Continue to learn how to set up an OpenAI-compatible server. +You can try with other prompts and models such as `meta-llama/Llama-3.2-1B`. Continue to learn how to set up an OpenAI-compatible server. diff --git a/content/learning-paths/servers-and-cloud-computing/vllm/vllm-server.md b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-server.md index c98aa9e170..68b848503e 100644 --- a/content/learning-paths/servers-and-cloud-computing/vllm/vllm-server.md +++ b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-server.md @@ -10,7 +10,7 @@ Instead of a batch run from Python, you can create an OpenAI-compatible server. Running a local LLM offers several advantages: -* Cost-effective - it avoids the costs associated with using external APIs, especially for high-usage scenarios.   +* Cost-effective - it avoids the costs associated with using external APIs, especially for high-usage scenarios. * Privacy - it keeps your data and prompts within your local environment, which enhances privacy and security. * Offline Capability - it enables operation without an internet connection, making it ideal for scenarios with limited or unreliable network access. @@ -19,7 +19,7 @@ OpenAI compatibility means that you can reuse existing software which was design Run vLLM with the same `Qwen/Qwen2.5-0.5B-Instruct` model: ```bash -python3 -m vllm.entrypoints.openai.api_server --model Qwen/Qwen2.5-0.5B-Instruct --dtype float16 +python3 -m vllm.entrypoints.openai.api_server --model Qwen/Qwen2.5-0.5B-Instruct --dtype float16 --max-num-batched-tokens 32768 ``` The server output displays that it is ready for requests: diff --git a/content/learning-paths/servers-and-cloud-computing/vllm/vllm-setup.md b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-setup.md index 109cc5e340..4f5a8d9b85 100644 --- a/content/learning-paths/servers-and-cloud-computing/vllm/vllm-setup.md +++ b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-setup.md @@ -8,7 +8,25 @@ layout: learningpathall ## Before you begin -To follow the instructions for this Learning Path, you will need an Arm server running Ubuntu 24.04 LTS with at least 8 cores, 16GB of RAM, and 50GB of disk storage. +To follow the instructions for this Learning Path, you will need an Arm server running Ubuntu 24.04 LTS with at least 8 cores, 16GB of RAM, and 50GB of disk storage. You also need a system which supports BFloat16. + +To check if your system includes BFloat16, use the `lscpu` command: + +```console +lscpu | grep bf16 +``` + +If the `Flags` are printed, you have a processor with BFloat16. + +```output +Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 asimddp sha512 sve asimdfhm dit uscat ilrcpc flagm sb paca pacg dcpodp sve2 sveaes svepmull svebitperm svesha3 flagm2 frint svei8mm svebf16 i8mm bf16 dgh rng bti +``` + +If the result is blank, you do not have a processor with BFloat16. + +BFloat16 provides improved performance and smaller memory footprint with the same dynamic range. You might experience a drop in model inference accuracy with BFloat16, but the impact is acceptable for the majority of applications. + +The instructions have been tested on an AWS Graviton3 `m7g.2xlarge` instance. ## What is vLLM? @@ -24,13 +42,7 @@ First, ensure your system is up-to-date and install the required tools and libra ```bash sudo apt-get update -y -sudo apt-get install -y curl ccache git wget vim numactl gcc-12 g++-12 python3 python3-pip python3-venv python-is-python3 libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 libssl-dev pkg-config -``` - -Set the default GCC to version 12: - -```bash -sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 +sudo apt-get install -y curl ccache git wget vim numactl gcc g++ python3 python3-pip python3-venv python-is-python3 libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 libssl-dev pkg-config ``` Next, install Rust. For more information, see the [Rust install guide](/install-guides/rust/). @@ -74,7 +86,7 @@ First, clone the vLLM repository from GitHub: ```bash git clone https://github.com/vllm-project/vllm.git cd vllm -git checkout 72ff3a968682e6a3f7620ab59f2baf5e8eb2777b +git checkout releases/v0.11.0 ``` {{% notice Note %}} @@ -86,8 +98,8 @@ Omit this command to use the latest code on the main branch. Install the Python packages for vLLM: ```bash -pip install -r requirements-build.txt -pip install -v -r requirements-cpu.txt +pip install -r requirements/build.txt +pip install -v -r requirements/cpu.txt ``` Build vLLM using Pip: @@ -104,4 +116,4 @@ rm -rf dist cd .. ``` -You are now ready to download an LLM and run vLLM. +You are now ready to download a large language model (LLM) and run vLLM. diff --git a/themes/arm-design-system-hugo-theme/layouts/robots.txt b/themes/arm-design-system-hugo-theme/layouts/robots.txt index e009a4b71e..4b884f01ef 100644 --- a/themes/arm-design-system-hugo-theme/layouts/robots.txt +++ b/themes/arm-design-system-hugo-theme/layouts/robots.txt @@ -1,6 +1,28 @@ User-agent: * +Disallow: + +# Explicitly welcome AI crawlers +User-agent: GPTBot +Allow: / + +User-agent: Google-Extended +Allow: / + +User-agent: ClaudeBot +Allow: / + +User-agent: PerplexityBot +Allow: / + +User-agent: anthropic-ai +Allow: / + +User-agent: Bytespider +Allow: / + +User-agent: CCBot Allow: / -Disallow: /*? +# Sitemaps help AI discovery Sitemap: https://learn.arm.com/sitemap.xml -Image Sitemap: https://learn.arm.com/learn-image-sitemap.xml \ No newline at end of file +Sitemap: https://learn.arm.com/learn-image-sitemap.xml \ No newline at end of file diff --git a/tools/maintenance.py b/tools/maintenance.py index 82ffcbbfd8..f5f1999b1f 100755 --- a/tools/maintenance.py +++ b/tools/maintenance.py @@ -22,6 +22,7 @@ Test Learning Path """ def check_lp(lp_path, link, debug): + test_image_results = None # initialize the variable if not os.path.isdir(lp_path): lp_path = os.path.dirname(lp_path) @@ -156,10 +157,13 @@ def main(): if args.stats_report: # If all test results are zero, all tests have passed patch.patch(args.instructions, results_dict, args.link) - if all(results_dict.get(k) for k in results_dict): - # Errors exist - logging.info("Tests failed in test suite") - sys.exit(1) + if results_dict is not None: + if all(results_dict.get(k) for k in results_dict): + # Errors exist + logging.info("Tests failed in test suite") + sys.exit(1) + else: + pass elif args.spelling: logging.info(f"Checking spelling of {args.spelling}") output = parse.spelling(args.spelling) diff --git a/tools/patch.py b/tools/patch.py index 4acf8dff40..2ecbd5dcb9 100644 --- a/tools/patch.py +++ b/tools/patch.py @@ -16,7 +16,7 @@ def patch(article_path: str, results: dict, link: str): article_path_pure = PurePath(re.sub(r"^.*?content/", "", article_path)) article_path_parts = list(article_path_pure.parts) if "learning-paths" in article_path_parts: - content_type, sw_category, content_title = article_path_parts + content_type, sw_category, content_title, *others = article_path_parts article_path = PurePath(article_path, "_index.md") elif "install-guides" in article_path_parts: # In case the install guide is in a subdirectory