diff --git a/.github/workflows/test-lp.yml b/.github/workflows/test-lp.yml index c4fe435409..adb71197d7 100644 --- a/.github/workflows/test-lp.yml +++ b/.github/workflows/test-lp.yml @@ -14,10 +14,40 @@ jobs: hugo - name: Get all changed markdown files id: changed-markdown-files - uses: step-security/changed-files@v45 + uses: tj-actions/changed-files@v46 with: files: | **.md + - name: Check for capital letters or spaces in content directory + run: | + echo "Checking for capital letters or spaces in content directory paths (excluding file extensions)..." + + tmpfile=$(mktemp) + + git diff --name-only origin/${{ github.base_ref }}...HEAD | + grep '^content/' | + while read -r path; do + name=$(basename "$path") + + # Strip file extension if it exists + base="${name%.*}" + + if [[ "$base" =~ [A-Z] || "$base" =~ [[:space:]] ]]; then + echo "Invalid name: $path" + echo "$path" >> "$tmpfile" + fi + done + + if [[ -s "$tmpfile" ]]; then + echo "❌ One or more files or directories in 'content/' contain capital letters or spaces (excluding extensions):" + cat "$tmpfile" + rm "$tmpfile" + exit 1 + else + rm "$tmpfile" + echo "✅ No capital letters or spaces found in 'content/' paths." + fi + - name: Install dependencies if: steps.changed-markdown-files.outputs.any_changed == 'true' run: pip install -r tools/requirements.txt @@ -26,7 +56,7 @@ jobs: if: steps.changed-markdown-files.outputs.any_changed == 'true' # Run the test suite run: | - set -o pipefail; ./tools/test_lp.sh ${{ steps.changed-markdown-files.outputs.all_changed_files }} 2>&1 | tee test-lp-output.txt + set -o pipefail; ./tools/test_lp.sh ${{ steps.changed-markdown-files.outputs.all_changed_files }} 2>&1 | tee test-lp-output.txt - name: Parse test suite errors id: test-suite-state if: success() diff --git a/.wordlist.txt b/.wordlist.txt index f5dcabd506..232cca1f39 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -4008,4 +4008,57 @@ sdks sendgrid soafee timestamping -transactional \ No newline at end of file +transactional +Biquad +CFFT +Christophe +Corriero +EBS +Favergeon +Fio +HDDs +Hanning +IFFT +Microbenchmark +NVMe +Nerdctl +NoiseSuppression +NoiseSuppressionReference +Paladugu +Phalani +PythonWrapper +Rani +RelWithDebInfo +Rescaling +SNR +VisualStudioSetup +WebUI +buildctl +channelwise +checksums +cmsisdsp +fio +frac +hanning +hdd +iops +iostat +iotop +microbenchmark +microbenchmarking +nerdctl +nr +nvme +observability +operationscount +paddings +pidstat +preloads +recordscount +rescaled +rescaling +subnoise +transcoders +transcoding +upi +windowsdeveloper \ No newline at end of file diff --git a/assets/contributors.csv b/assets/contributors.csv index 1d0b48cc52..61a086e88e 100644 --- a/assets/contributors.csv +++ b/assets/contributors.csv @@ -84,3 +84,5 @@ Shuheng Deng,Arm,,,, Yiyang Fan,Arm,,,, Julien Jayat,Arm,,,, Geremy Cohen,Arm,geremyCohen,geremyinanutshell,, +Barbara Corriero,Arm,,,, + diff --git a/config.toml b/config.toml index ee086f99cc..a262d66618 100644 --- a/config.toml +++ b/config.toml @@ -27,6 +27,12 @@ cloudFrontDistributionID = "E2NEF61QWPFRIH" [markup.goldmark] [markup.goldmark.renderer] unsafe = true + [markup.goldmark.extensions] + [markup.goldmark.extensions.passthrough] + enable = true + [markup.goldmark.extensions.passthrough.delimiters] + block = [['\[', '\]'], ['$$', '$$']] + inline = [['\(', '\)']] [frontmatter] lastmod = ["lastmod", ":git", "date", "publishDate"] @@ -83,3 +89,5 @@ title = 'Arm Learning Paths' description = 'Tutorials with code examples, created by the Arm ecosystem to develop better code faster across all platforms: Servers, phones, laptops, embedded devices, and microcontrollers.' social_image = '/img/social-image.png' twitter_handle = '@ArmSoftwareDev' + +math = true diff --git a/content/install-guides/acfl.md b/content/install-guides/acfl.md index b68016d207..a245253464 100644 --- a/content/install-guides/acfl.md +++ b/content/install-guides/acfl.md @@ -16,9 +16,6 @@ test_images: - fedora:latest test_link: null test_maintenance: true -test_status: -- passed -- passed title: Arm Compiler for Linux tool_install: true weight: 1 @@ -89,7 +86,7 @@ bash <(curl -L https://developer.arm.com/-/cdn-downloads/permalink/Arm-Compiler- Download releases from the command line using `wget`. Install `wget` if not present. -```console +```bash sudo apt install wget ``` @@ -117,7 +114,7 @@ Each command sequence includes accepting the license agreement to automate the i #### Ubuntu Linux: -```bash { target="ubuntu:latest", env="DEBIAN_FRONTEND=noninteractive" } +```bash { target="ubuntu:latest" } sudo -E apt-get -y install environment-modules python3 libc6-dev tar -xvf arm-compiler-for-linux_24.10.1_Ubuntu-22.04_aarch64.tar cd ./arm-compiler-for-linux_24.10.1_Ubuntu-22.04 @@ -126,10 +123,10 @@ sudo ./arm-compiler-for-linux_24.10.1_Ubuntu-22.04.sh --accept #### Red Hat Linux: -```bash { target="fedora:latest" } +```console sudo yum -y install environment-modules python3 glibc-devel tar -xvf arm-compiler-for-linux_24.10.1_RHEL-9_aarch64.tar -cd arm-compiler-for-linux_24.10.1_RHEL-9 +cd ./arm-compiler-for-linux_24.10.1_RHEL-9 sudo ./arm-compiler-for-linux_24.10.1_RHEL-9.sh --accept ``` @@ -150,25 +147,23 @@ Arm Compiler for Linux is available to install with the Ubuntu system package ma #### Set up the ACfL package repository -Add the ACfL `apt` package repository to your Ubuntu 20.04 or 22.04 system: +Add the ACfL `apt` package repository to your system. These instructions apply for Ubuntu 22.04. You can check the [available versions](https://developer.arm.com/packages/) for other releases. -```bash { target="ubuntu:latest" } +After that, the ACfL Ubuntu package repository is now ready to use. Run the commands below to install the dependencies needed. + + +```console sudo apt update -sudo apt install -y curl environment-modules python3 libc6-dev -. /etc/os-release -curl "https://developer.arm.com/packages/ACfL%3A${NAME}-${VERSION_ID/%.*/}/${VERSION_CODENAME}/Release.key" | sudo tee /etc/apt/trusted.gpg.d/developer-arm-com.asc -echo "deb https://developer.arm.com/packages/ACfL%3A${NAME}-${VERSION_ID/%.*/}/${VERSION_CODENAME}/ ./" | sudo tee /etc/apt/sources.list.d/developer-arm-com.list +sudo apt install -y wget gnupg gpg environment-modules python3 libc6-dev +wget -qO - https://developer.arm.com/packages/ACfL%3AUbuntu-22/jammy/Release.key | sudo tee /etc/apt/trusted.gpg.d/developer-arm-com.asc +echo "deb https://developer.arm.com/packages/ACfL%3AUbuntu-22/jammy/ ./" | sudo tee /etc/apt/sources.list.d/developer-arm-com.list sudo apt update ``` -The ACfL Ubuntu package repository is now ready to use. +Install Arm Compiler for Linux with: -#### Install ACfL - -Download and install Arm Compiler for Linux with: - -```bash { target="ubuntu:latest" } -sudo apt install acfl +```console +sudo apt install acfl -y ``` ### Amazon Linux 2023 @@ -179,25 +174,25 @@ Arm Compiler for Linux is available to install with either the `dnf` or `yum` sy Install ACfL and prerequisites from the Amazon Linux 2023 `rpm` package repository with `dnf`: -```bash -sudo dnf update -sudo dnf install 'dnf-command(config-manager)' procps psmisc make environment-modules -sudo dnf config-manager --add-repo https://developer.arm.com/packages/ACfL%3AAmazonLinux-2023/latest/ACfL%3AAmazonLinux-2023.repo -sudo dnf install acfl +```console +sudo dnf update -y +sudo dnf -y install 'dnf-command(config-manager)' procps psmisc make environment-modules +sudo dnf config-manager addrepo --from-repofile=https://developer.arm.com/packages/ACfL%3AAmazonLinux-2023/latest/ACfL%3AAmazonLinux-2023.repo +sudo dnf -y install acfl ``` Or using the equivalent `yum` commands: -```bash -sudo yum update -sudo yum install 'dnf-command(config-manager)' procps psmisc make environment-modules -sudo yum config-manager --add-repo https://developer.arm.com/packages/ACfL%3AAmazonLinux-2023/latest/ACfL%3AAmazonLinux-2023.repo -sudo yum install acfl +```console +sudo yum update -y +sudo yum -y install 'dnf-command(config-manager)' procps psmisc make environment-modules +sudo yum config-manager addrepo --from-repofile=https://developer.arm.com/packages/ACfL%3AAmazonLinux-2023/latest/ACfL%3AAmazonLinux-2023.repo +sudo yum -y install acfl ``` The ACfL tools are now ready to use. -### Red Hat Enterprise Linux (RHEL) 9 +### Red Hat Enterprise Linux (RHEL) 9 Arm Compiler for Linux is available to install with either the `dnf` or `yum` system package manager. @@ -205,20 +200,20 @@ Arm Compiler for Linux is available to install with either the `dnf` or `yum` sy Install ACfL and prerequisites from the RHEL 9 `rpm` package repository with `dnf`: -```bash -sudo dnf update -sudo dnf install 'dnf-command(config-manager)' procps psmisc make environment-modules -sudo dnf config-manager --add-repo https://developer.arm.com/packages/ACfL%3ARHEL-9/standard/ACfL%3ARHEL-9.repo -sudo dnf install acfl +```console +sudo dnf update -y +sudo dnf -y install 'dnf-command(config-manager)' procps psmisc make environment-modules +sudo dnf config-manager addrepo --from-repofile=https://developer.arm.com/packages/ACfL%3ARHEL-9/standard/ACfL%3ARHEL-9.repo +sudo dnf -y install acfl ``` Or using the equivalent `yum` commands: -```bash -sudo yum update -sudo yum install 'dnf-command(config-manager)' procps psmisc make environment-modules -sudo yum config-manager --add-repo https://developer.arm.com/packages/ACfL%3ARHEL-9/standard/ACfL%3ARHEL-9.repo -sudo yum install acfl +```console +sudo yum update -y +sudo yum -y install 'dnf-command(config-manager)' procps psmisc make environment-modules +sudo yum config-manager addrepo --from-repofile=https://developer.arm.com/packages/ACfL%3ARHEL-9/standard/ACfL%3ARHEL-9.repo +sudo yum -y install acfl ``` The ACfL tools are now ready to use. @@ -227,39 +222,35 @@ The ACfL tools are now ready to use. Arm Compiler for Linux uses environment modules to dynamically modify your user environment. Refer to the [Environment Modules documentation](https://lmod.readthedocs.io/en/latest/#id) for more information. -Set up the environment, for example, in your `.bashrc` and add module files. +Set up the environment, for example, in your `.bashrc` and add module files. Then, list the available modules: #### Ubuntu Linux: -```bash { target="ubuntu:latest" } -echo ". /usr/share/modules/init/bash" >> ~/.bashrc -echo "module use /opt/arm/modulefiles" >> ~/.bashrc -. ~/.bashrc +```bash { target="ubuntu:latest" pre_cmd=". /usr/share/modules/init/bash" pre_cmd="module use /opt/arm/modulefiles" } +echo ". /usr/share/modules/init/bash" >> $HOME/.bashrc +echo "module use /opt/arm/modulefiles" >> $HOME/.bashrc +source $HOME/.bashrc +module avail ``` #### Red Hat or Amazon Linux: -```bash { target="fedora:latest" } -echo ". /usr/share/Modules/init/bash" >> ~/.bashrc -echo "module use /opt/arm/modulefiles" >> ~/.bashrc -. ~/.bashrc -``` - -To list available modules: - -```bash { env_source="~/.bashrc" } +```console +echo ". /usr/share/Modules/init/bash" >> $HOME/.bashrc +echo "module use /opt/arm/modulefiles" >> $HOME/.bashrc +source $HOME/.bashrc module avail ``` To configure Arm Compiler for Linux: -```bash { env_source="~/.bashrc" } +```console module load acfl/24.10.1 ``` To configure GCC: -```bash { env_source="~/.bashrc" } +```console module load gnu/14.2.0 ``` @@ -318,7 +309,7 @@ ACfL is now [ready to use](#armclang). To get started with the Arm C/C++ Compiler and compile a simple application follow the steps below. Check that the correct compiler version is being used: -```bash { env_source="~/.bashrc", pre_cmd="module load acfl/24.10.1" } +```bash { env_source="$HOME/.bashrc", pre_cmd=". /usr/share/modules/init/bash; module use /opt/arm/modulefiles; module load acfl/24.10.1" } armclang --version ``` @@ -336,13 +327,13 @@ int main() Build the application with: -```console { env_source="~/.bashrc", pre_cmd="module load acfl/24.10.1" } +```console armclang hello.c -o hello ``` Run the application with: -```bash { env_source="~/.bashrc", pre_cmd="module load acfl/24.10.1" } +```console ./hello ``` @@ -356,7 +347,7 @@ Hello, C World! To get started with the Arm Fortran Compiler and compile a simple application follow the steps below. Check that the correct compiler version is being used: -```bash { env_source="~/.bashrc", pre_cmd="module load acfl/24.10.1" } +```bash { env_source="$HOME/.bashrc", pre_cmd=". /usr/share/modules/init/bash; module use /opt/arm/modulefiles; module load acfl/24.10.1" } armflang --version ``` @@ -370,12 +361,12 @@ end program hello ``` Build the application with: -```bash { env_source="~/.bashrc", pre_cmd="module load acfl/24.10.1" } +```console armflang hello.f90 -o hello ``` Run the application with: -```bash { env_source="~/.bashrc", pre_cmd="module load acfl/24.10.1" } +```console ./hello ``` diff --git a/content/install-guides/anaconda.md b/content/install-guides/anaconda.md index c74c73ed6b..91b5906759 100644 --- a/content/install-guides/anaconda.md +++ b/content/install-guides/anaconda.md @@ -15,8 +15,6 @@ test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed title: Anaconda tool_install: true weight: 1 diff --git a/content/install-guides/azure-cli.md b/content/install-guides/azure-cli.md index 14022f007b..345e8372a3 100644 --- a/content/install-guides/azure-cli.md +++ b/content/install-guides/azure-cli.md @@ -11,8 +11,6 @@ official_docs: https://learn.microsoft.com/en-us/cli/azure test_images: - ubuntu:latest test_maintenance: true -test_status: -- passed title: Azure CLI tool_install: true weight: 1 diff --git a/content/install-guides/dotnet.md b/content/install-guides/dotnet.md index c54aebae9d..6598c7d79e 100644 --- a/content/install-guides/dotnet.md +++ b/content/install-guides/dotnet.md @@ -11,8 +11,6 @@ test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed title: .NET SDK tool_install: true weight: 1 @@ -20,7 +18,7 @@ weight: 1 The [.NET SDK](https://dotnet.microsoft.com/en-us/) is a free, open-source, and cross-platform development environment that provides a broad set of tools and libraries for building applications. You can use it to create a variety of applications including web apps, mobile apps, desktop apps, and cloud services. -The .NET SDK is available for Linux distributions on Arm-based systems. +The .NET SDK is available for Linux distributions on Arm-based systems. ## What should I do before installing the .NET SDK on Arm Linux? @@ -44,7 +42,7 @@ There are two ways to install the .NET SDK on your computer: - Using the Linux package manager. - Using the install script. -Select the one that works best for you. +Select the one that works best for you. ### How can I install .NET SDK using the Linux package manager? @@ -68,11 +66,11 @@ To install the .NET SDK using a script, follow the instructions below: 1. Download the install script: -```bash +```bash wget https://dot.net/v1/dotnet-install.sh ``` -2. Run the script (it will install .NET SDK 8 under the folder .dotnet): +2. Run the script (it will install .NET SDK 8 under the folder .dotnet): You have some options to specify the version you want to install. @@ -104,9 +102,9 @@ You can also add the search path to your `$HOME/.bashrc` so it is set for all ne ## How do I verify the .NET SDK installation? -To check that the installation was successful, type: +To check that the installation was successful, type: -```bash +```bash dotnet --list-sdks ``` @@ -190,7 +188,7 @@ The expected output in the console is: Hello World! ``` -You are ready to use the .NET SDK on Arm Linux. +You are ready to use the .NET SDK on Arm Linux. You can find more information about .NET on Arm in the [AWS Graviton Technical Guide](https://github.com/aws/aws-graviton-getting-started/blob/main/dotnet.md). diff --git a/content/install-guides/eksctl.md b/content/install-guides/eksctl.md index 172e71b03d..d72a281332 100644 --- a/content/install-guides/eksctl.md +++ b/content/install-guides/eksctl.md @@ -14,8 +14,6 @@ test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed title: AWS EKS CLI (eksctl) tool_install: true weight: 1 @@ -27,7 +25,7 @@ The EKS CLI is available for a variety of operating systems and Linux distributi ## What should I do before installing eksctl on Arm Linux and Windows on Arm? -This install guide provides a quick solution to install `eksctl` on Arm Linux and Windows on Arm. +This install guide provides a quick solution to install `eksctl` on Arm Linux and Windows on Arm. 1. Confirm you have an Arm machine @@ -45,11 +43,11 @@ aarch64 If you see a different result, you are not using an Arm computer running 64-bit Linux. -For Windows, confirm the Arm architecture by typing "Settings" in the Windows search box. +For Windows, confirm the Arm architecture by typing "Settings" in the Windows search box. When the settings appear, click System on the left side and then About at the bottom. -In the Device specifications section look for "ARM-based processor" in the System type area. +In the Device specifications section look for "ARM-based processor" in the System type area. 2. Install `kubectl` @@ -57,13 +55,13 @@ Install the Kubernetes command-line tool, `kubectl`, using the [Kubectl install ## How do I download and install eksctl on Arm Linux? -1. Download the `eksctl` package using `curl`: +1. Download the `eksctl` package using `curl`: ```bash { target="ubuntu:latest" } curl -sLO "https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_Linux_arm64.tar.gz" ``` -2. Install `eksctl` with: +2. Install `eksctl` with: ```bash { target="ubuntu:latest" } tar -xzf eksctl_Linux_arm64.tar.gz -C /tmp && rm eksctl_Linux_arm64.tar.gz @@ -104,11 +102,11 @@ The output will be similar to: `eksctl` relies on the AWS CLI being installed and configured. Use the [AWS CLI install guide](/install-guides/aws-cli/) to install the AWS CLI. The CLI provides the `aws` command. -You will also need to configure the AWS CLI using the `aws configure` or the `aws configure sso` command. There are multiple ways to configure the CLI, including environment variables, command-line options, and credentials files. Refer to the [Configure the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) for more details. +You will also need to configure the AWS CLI using the `aws configure` or the `aws configure sso` command. There are multiple ways to configure the CLI, including environment variables, command-line options, and credentials files. Refer to the [Configure the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html) for more details. ## How do I use eksctl to create a simple EKS cluster? -With your AWS account configured, run `eksctl` to create a cluster with 2 nodes with AWS Graviton processors: +With your AWS account configured, run `eksctl` to create a cluster with 2 nodes with AWS Graviton processors: ```console eksctl create cluster \ @@ -116,12 +114,12 @@ eksctl create cluster \ --region us-east-1 \ --node-type t4g.small \ --nodes 2 \ ---nodegroup-name node-group-1 +--nodegroup-name node-group-1 ``` -Use the AWS console to look at the resources associated with the cluster and monitor progress of cluster creation. +Use the AWS console to look at the resources associated with the cluster and monitor progress of cluster creation. -When the cluster is created, use `kubectl` to get the status of the nodes in the cluster. +When the cluster is created, use `kubectl` to get the status of the nodes in the cluster. ```console kubectl get nodes -o wide diff --git a/content/install-guides/forge.md b/content/install-guides/forge.md index a7b390ed42..209d3c3564 100644 --- a/content/install-guides/forge.md +++ b/content/install-guides/forge.md @@ -15,8 +15,6 @@ test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed title: Linaro Forge tool_install: true weight: 1 @@ -35,7 +33,7 @@ Linaro Forge runs on Linux hosts and multiple architectures. See the Linaro Forg This install guide assumes an Arm AArch64 platform running Ubuntu Linux. -## Download +## Download Download and extract the appropriate installation package from [Linaro Forge Downloads](https://www.linaroforge.com/downloadForge/). @@ -73,9 +71,9 @@ Run the Windows file executable to install the Linaro Forge Remote Client. ### Graphical installer -Optionally, you can run the installer executable with a graphical interface. +Optionally, you can run the installer executable with a graphical interface. -```bash +```console cd linaro-forge-24.0.3-linux-aarch64/ ./installer ``` @@ -171,4 +169,4 @@ perf-report -n 128 myapp ``` Two files `myapp_128p_.html` and `myapp_128p_.txt` will be created in the current directory. -Linaro Forge is now installed and ready to use. +Linaro Forge is now installed and ready to use. diff --git a/content/install-guides/gcc/arm-gnu.md b/content/install-guides/gcc/arm-gnu.md index b82d2b4253..fa04b1212e 100644 --- a/content/install-guides/gcc/arm-gnu.md +++ b/content/install-guides/gcc/arm-gnu.md @@ -12,9 +12,6 @@ test_images: - fedora:latest test_link: null test_maintenance: true -test_status: -- passed -- passed title: Arm GNU Toolchain tool_install: false weight: 4 @@ -30,19 +27,19 @@ However there are reasons you may wish to use earlier compiler versions, so olde ## How do I download the Arm GNU Toolchain? {#download} Arm GNU Toolchain releases consist of cross toolchains for the following host operating systems: - -Linux + +Linux * Available for x86_64 and AArch64 host architectures - * Available for bare-metal and Linux targets - -Windows + * Available for bare-metal and Linux targets + +Windows * Available for x86 host architecture only (compatible with x86_64) * Available for bare-metal and Linux targets - -macOS + +macOS * Available for x86_64 and Apple silicon (beta) host architectures * Available for bare-metal targets only - + Download the correct toolchain variant for your development needs from the [Arm Developer website](https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/downloads). ## How do I install the Arm GNU Toolchain on Linux? @@ -80,7 +77,7 @@ source ~/.bashrc ## How do I install the Arm GNU Toolchain on macOS? -Downloads for `macOS` are available as tar files (`.tar.xz`) and package files (`.pkg`). +Downloads for `macOS` are available as tar files (`.tar.xz`) and package files (`.pkg`). ### tar files For `.tar.xz` files, unpack the downloaded file to the install directory. @@ -89,7 +86,7 @@ tar xJf arm-gnu-toolchain---.tar.xz -C /path/to/inst ``` ### pkg files -For `.pkg` files use the installer. +For `.pkg` files use the installer. ```console sudo installer -pkg arm-gnu-toolchain---.pkg -target / ``` @@ -114,7 +111,7 @@ The `/etc/paths` file is a list of paths to search. ### Apple Silicon -Here is a specific example for macOS with Apple Silicon and the AArch32 bare-metal target. +Here is a specific example for macOS with Apple Silicon and the AArch32 bare-metal target. ```console wget https://developer.arm.com/-/media/Files/downloads/gnu/14.2.rel1/binrel/arm-gnu-toolchain-14.2.rel1-darwin-arm64-arm-none-eabi.pkg @@ -136,15 +133,15 @@ For example, to install the tools silently, amend users `PATH` and add registry ```console gcc-arm---mingw-w64-i686-arm-none-eabi.exe /S /P /R ``` -The zip package is a backup to Windows installer for those who cannot run the installer. You can unzip the package and then invoke the tools directly. +The zip package is a backup to Windows installer for those who cannot run the installer. You can unzip the package and then invoke the tools directly. -## Setting up product license +## Setting up product license Arm GNU Toolchain is open sourced and freely available for use. No licenses need to be set up for use. To use the Arm GNU Toolchain in conjunction with [Arm Development Studio](https://developer.arm.com/Tools%20and%20Software/Arm%20Development%20Studio) you must [register the toolchain](https://developer.arm.com/documentation/101469/2022-0/Installing-and-configuring-Arm-Development-Studio/Register-a-compiler-toolchain). -## Get started +## Get started To verify the installation is correct enter: ```console diff --git a/content/install-guides/gcc/cross.md b/content/install-guides/gcc/cross.md index 5269ad4ff5..66dd1afae1 100644 --- a/content/install-guides/gcc/cross.md +++ b/content/install-guides/gcc/cross.md @@ -12,14 +12,11 @@ test_images: - fedora:latest test_link: null test_maintenance: true -test_status: -- passed -- passed title: Cross-compiler tool_install: false weight: 3 --- -GCC is available on all Linux distributions and can be installed using the package manager. +GCC is available on all Linux distributions and can be installed using the package manager. This covers `gcc` and `g++` for compiling C and C++ as a cross-compiler targeting the Arm architecture. @@ -27,9 +24,9 @@ This covers `gcc` and `g++` for compiling C and C++ as a cross-compiler targetin GCC is often used to cross-compile software for Arm microcontrollers and embedded devices which have firmware and other low-level software. The executables are `arm-none-eabi-gcc` and `arm-none-eabi-g++`. -GCC is also used to cross compile Linux applications. Applications can be compiled for 32-bit or 64-bit Linux systems. +GCC is also used to cross compile Linux applications. Applications can be compiled for 32-bit or 64-bit Linux systems. -The executables for 32-bit are `arm-linux-gnueabihf-gcc` and `arm-linux-gnueabihf-g++`. +The executables for 32-bit are `arm-linux-gnueabihf-gcc` and `arm-linux-gnueabihf-g++`. The executables for 64-bit are `aarch64-linux-gnu-gcc` and `aarch64-linux-gnu-g++`. @@ -90,7 +87,7 @@ brew install arm-none-eabi-gcc ## Setting up product license {#license} -GCC is open source and freely available for use. +GCC is open source and freely available for use. ## Get started {#start} diff --git a/content/install-guides/gcc/native.md b/content/install-guides/gcc/native.md index 18892ccd5a..3ba51a6cf5 100644 --- a/content/install-guides/gcc/native.md +++ b/content/install-guides/gcc/native.md @@ -12,9 +12,6 @@ test_images: - fedora:latest test_link: null test_maintenance: true -test_status: -- passed -- passed title: Native compiler tool_install: false weight: 2 @@ -68,16 +65,16 @@ To install the most common development tools use the commands below. If the mach ```bash { target="fedora:latest" } sudo dnf update -y -sudo dnf groupinstall 'Development Tools' -y +sudo dnf group install "development-tools" -y ``` If `sudo` is not available become _root_ and omit the `sudo`. ```console dnf update -y -dnf groupinstall 'Development Tools' -y +sudo dnf group install "development-tools" -y ``` ## Setting up product license -GCC is open source and freely available for use. +GCC is open source and freely available for use. ## Get started {#start} diff --git a/content/install-guides/gfortran.md b/content/install-guides/gfortran.md index c3f9f63e8e..d37613e292 100644 --- a/content/install-guides/gfortran.md +++ b/content/install-guides/gfortran.md @@ -18,8 +18,6 @@ test_images: - fedora:latest test_link: null test_maintenance: true -test_status: -- passed title: GFortran tool_install: true weight: 1 @@ -47,7 +45,7 @@ aarch64 If you see a different result, you are not using an Arm computer running 64-bit Linux. -## Download +## Download The Linux package manager downloads the required files so there are no special instructions. @@ -64,7 +62,7 @@ sudo apt install gfortran -y ### Installing on Red Hat / Fedora / Amazon Linux -These Linux distributions use `yum` as the package manager. +These Linux distributions use `yum` as the package manager. To install the most common development tools use the commands below. If the machine has `sudo` you can use it. diff --git a/content/install-guides/kubectl.md b/content/install-guides/kubectl.md index 2d9a7b33b8..670a72fe12 100644 --- a/content/install-guides/kubectl.md +++ b/content/install-guides/kubectl.md @@ -17,8 +17,6 @@ test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed title: Kubectl tool_install: true weight: 1 @@ -30,7 +28,7 @@ The Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference ## What should I consider before installing kubectl for Ubuntu on Arm? -[General installation information](https://kubernetes.io/docs/tasks/tools/) is available which covers all supported operating systems, but it doesn't talk about Arm. +[General installation information](https://kubernetes.io/docs/tasks/tools/) is available which covers all supported operating systems, but it doesn't talk about Arm. This article provides a quick solution to install `kubectl` for Ubuntu on Arm. @@ -48,7 +46,7 @@ If you see a different result, you are not using an Arm computer running 64-bit ## How do I download and Install kubectl for Ubuntu on Arm? -The easiest way to install `kubectl` for Ubuntu on Arm is to use curl and copy the executable to a common location. +The easiest way to install `kubectl` for Ubuntu on Arm is to use curl and copy the executable to a common location. To install curl, for example on ubuntu: @@ -62,7 +60,7 @@ Download and install the latest version of `kubctl`. There is just 1 executable curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" ``` -If you have sudo or root access, install the executable in a common location for all users. +If you have sudo or root access, install the executable in a common location for all users. ```bash { target="ubuntu:latest" } sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl diff --git a/content/install-guides/nerdctl.md b/content/install-guides/nerdctl.md new file mode 100644 index 0000000000..304217e5e5 --- /dev/null +++ b/content/install-guides/nerdctl.md @@ -0,0 +1,309 @@ +--- +title: Nerdctl +author: Jason Andrews + +minutes_to_complete: 10 + +official_docs: https://github.com/containerd/nerdctl/blob/main/docs/command-reference.md + +additional_search_terms: +- container +- containerd +- docker +- Linux + +test_images: +- ubuntu:latest +test_maintenance: false + +tool_install: true +layout: installtoolsall +multi_install: false +multitool_install_part: false +weight: 1 +--- + +[Nerdctl](https://github.com/containerd/nerdctl) is an open-source command-line interface (CLI) designed to be compatible with the Docker CLI, but specifically for interacting with [containerd](https://containerd.io/). It provides a familiar user experience for developers who are familiar with Docker, while leveraging the capabilities of containerd as the underlying container runtime. + +Using `containerd` and `nerdctl` provides similar functionality to Docker but with a smaller memory footprint, making it ideal for IoT and edge solutions, especially on Arm devices that balance energy efficiency and performance. + +Nerdctl also supports running containers in rootless mode, which helps enhance security by not requiring elevated privileges. Rootless mode is not covered below but you can refer to the [documentation](https://rootlesscontaine.rs/getting-started/containerd/) for information about how to run `containerd-rootless-setuptool.sh install`. + +This guide explains how to install and use `containerd` and `nerdctl` on Arm Linux, and how to run commands with `sudo`. + +## Before you begin + +This guide assumes you are using a Debian-based Arm Linux distribution, including Ubuntu and Raspberry Pi OS. You can use a local Arm Linux computer or an Arm instance in the cloud. + +Confirm you are using an Arm machine by running: + +```bash +uname -m +``` + +The output should be: + +```output +aarch64 +``` + +Ensure `wget` and `tar` are installed. Most distributions will include them, but if not, run: + +```bash +sudo apt-get update +sudo apt-get install -y wget tar +``` + +## Install containerd + +Install the `containerd` runtime: + +```bash +sudo apt-get install containerd -y +``` + +Start and enable the `containerd` service: + +```bash +sudo systemctl start containerd +sudo systemctl enable containerd +``` + +Confirm the service is running: + +```console +systemctl status containerd.service +``` + +When `containerd` is running, the output is similar to: + +```output +● containerd.service - containerd container runtime + Loaded: loaded (/usr/lib/systemd/system/containerd.service; enabled; preset: enabled) + Active: active (running) since Tue 2025-04-22 20:12:03 UTC; 2min 20s ago + Docs: https://containerd.io + Main PID: 8428 (containerd) + Tasks: 9 + Memory: 13.0M (peak: 13.7M) + CPU: 401ms + CGroup: /system.slice/containerd.service + └─8428 /usr/bin/containerd +``` + +## Install nerdctl and CNI plugins + +Install `nerdctl` and the necessary CNI (Container Network Interface) plugins: + +```bash +NERDCTL_VERSION=$(curl -s https://api.github.com/repos/containerd/nerdctl/releases/latest | grep tag_name | cut -d '"' -f 4 | sed 's/v//') +wget https://github.com/containerd/nerdctl/releases/download/v${NERDCTL_VERSION}/nerdctl-${NERDCTL_VERSION}-linux-arm64.tar.gz +sudo tar -xzvf nerdctl-${NERDCTL_VERSION}-linux-arm64.tar.gz -C /usr/local/bin +``` + +Install the CNI plugins: + +```bash +CNI_VERSION=$(curl -s https://api.github.com/repos/containernetworking/plugins/releases/latest | grep tag_name | cut -d '"' -f 4 | sed 's/v//') +wget https://github.com/containernetworking/plugins/releases/download/v${CNI_VERSION}/cni-plugins-linux-arm64-v${CNI_VERSION}.tgz +sudo mkdir -p /opt/cni/bin +sudo tar -xzvf cni-plugins-linux-arm64-v${CNI_VERSION}.tgz -C /opt/cni/bin +``` + +Clean up the downloaded files: + +```bash +rm nerdctl-${NERDCTL_VERSION}-linux-arm64.tar.gz cni-plugins-linux-arm64-v${CNI_VERSION}.tgz +``` + +{{% notice Note %}} +The commands above attempt to fetch the latest versions automatically. If required, you can replace `${NERDCTL_VERSION}` and `${CNI_VERSION}` with specific versions. +{{% /notice %}} + +## Install BuildKit + +If you want to build container images with `nerdctl`, you need to install [BuildKit](https://github.com/moby/buildkit). + +If you only plan to run container images (not build them), you can skip this step. + +```bash +BUILDKIT_VERSION=$(curl -s https://api.github.com/repos/moby/buildkit/releases/latest | grep tag_name | cut -d '"' -f 4 | sed 's/v//') +wget https://github.com/moby/buildkit/releases/download/v${BUILDKIT_VERSION}/buildkit-v${BUILDKIT_VERSION}.linux-arm64.tar.gz +sudo tar -xzvf buildkit-v${BUILDKIT_VERSION}.linux-arm64.tar.gz -C /usr +rm buildkit-v${BUILDKIT_VERSION}.linux-arm64.tar.gz +``` + +Create a systemd service for BuildKit: + +```bash +sudo tee /etc/systemd/system/buildkit.service > /dev/null << EOF +[Unit] +Description=BuildKit +Documentation=https://github.com/moby/buildkit + +[Service] +ExecStart=/usr/bin/buildkitd --oci-worker=false --containerd-worker=true + +[Install] +WantedBy=multi-user.target +EOF +``` + +Start and enable the BuildKit service: + +```bash +sudo systemctl daemon-reload +sudo systemctl start buildkit +sudo systemctl enable buildkit +``` + +Verify BuildKit is running: + +```console +sudo systemctl status buildkit +``` + +When running, the output is similar to: + +```output +ubuntu@m1u:~$ sudo systemctl status buildkit +● buildkit.service - BuildKit + Loaded: loaded (/etc/systemd/system/buildkit.service; enabled; preset: enabled) + Active: active (running) since Tue 2025-04-22 22:55:39 CDT; 18min ago + Docs: https://github.com/moby/buildkit + Main PID: 22280 (buildkitd) + Tasks: 10 (limit: 4598) + Memory: 14.6M (peak: 42.0M) + CPU: 1.144s + CGroup: /system.slice/buildkit.service + └─22280 /usr/bin/buildkitd --oci-worker=false --containerd-worker=true +``` + +Check that buildctl can communicate with the daemon: + +```console +sudo buildctl debug workers +``` + +If BuildKit is properly installed, you should see output similar to: + +```output +ID PLATFORMS +jz1h9gb0xq39ob6868cr3ev6r linux/arm64 +``` + +## Verify the installation + +You can check the `nerdctl` version: + +```console +sudo nerdctl version +``` + +Test your installation by running a simple container that prints the processor architecture: + +```console +sudo nerdctl run --name uname armswdev/uname +``` + +Wait a few seconds for the container to start. It will print the system architecture: + +```output +Architecture is aarch64 +``` + +Clean up the test container: + +```console +sudo nerdctl rm uname +``` + +To build a container image, save the following lines to a file named `Dockerfile`. + +```console +FROM ubuntu:latest +CMD echo -n "Architecture is " && uname -m +``` + +Build the container image: + +```console +sudo nerdctl build -t uname -f Dockerfile . +``` + +Run the new container image: + +```console +sudo nerdctl run uname +``` + +The output is the architecture: + +```output +Architecture is aarch64 +``` + +## Basic nerdctl commands + +Here are some common commands to get you started: + +List running containers: + +```console +sudo nerdctl ps +``` + +List all containers (including stopped): + +```console +sudo nerdctl ps -a +``` + +List images: + +```console +sudo nerdctl images +``` + +Pull an image: + +```console +sudo nerdctl pull : +``` + +Build an image from Dockerfile in current directory: + +```console +sudo nerdctl build -t : . +``` + +Remove an image: + +```console +sudo nerdctl rmi : +``` + +Stop a container: + +```console +sudo nerdctl stop +``` + +Remove a container: + +```console +sudo nerdctl rm +``` + +View container logs: + +```console +sudo nerdctl logs +``` + +Execute a command in a running container: + +```console +sudo nerdctl exec -it +``` + +You are now ready to use `nerdctl` and `containerd` to manage containers on Arm Linux. diff --git a/content/install-guides/oci-cli.md b/content/install-guides/oci-cli.md index 254f3dc9ca..ecf4ed435e 100644 --- a/content/install-guides/oci-cli.md +++ b/content/install-guides/oci-cli.md @@ -2,7 +2,7 @@ additional_search_terms: - cloud - OCI -- Oracle +- Oracle - infrastructure layout: installtoolsall @@ -14,21 +14,19 @@ official_docs: https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/cliinstall test_images: - ubuntu:latest test_maintenance: true -test_status: -- passed title: Oracle Cloud Infrastructure (OCI) CLI tool_install: true weight: 1 --- -The Oracle Cloud Infrastructure Command Line Interface (OCI CLI) is a cross-platform command-line tool that can be installed locally on development computers. OCI CLI is used to connect to OCI and execute administrative commands on OCI resources. +The Oracle Cloud Infrastructure Command Line Interface (OCI CLI) is a cross-platform command-line tool that can be installed locally on development computers. OCI CLI is used to connect to OCI and execute administrative commands on OCI resources. -It is available for a variety of operating systems and Linux distributions and has multiple ways to install it. +It is available for a variety of operating systems and Linux distributions and has multiple ways to install it. ## Before you begin -[General installation information](https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/cliinstall.htm#InstallingCLI__linux_and_unix) is available which covers all supported Linux distributions. - +[General installation information](https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/cliinstall.htm#InstallingCLI__linux_and_unix) is available which covers all supported Linux distributions. + This install guide provides a quick solution to install OCI CLI for Ubuntu on Arm. Confirm you are using an Arm machine by running: @@ -47,46 +45,40 @@ If you see a different result, you are not using an Arm computer running 64-bit ## Download and Install -The easiest way to install OCI CLI for Ubuntu on Arm is to use the install script. +The easiest way to install OCI CLI for Ubuntu on Arm is to use the install script. You can run an automated install with default values or an interactive install to change the default values. -To run an an automated install with default values run: +To run an an automated install with default values run: ```bash { target="ubuntu:latest" } curl -o install.sh https://raw.githubusercontent.com/oracle/oci-cli/master/scripts/install/install.sh -bash ./install.sh --accept-all-defaults +bash ./install.sh --accept-all-defaults --update-path-and-enable-tab-completion --rc-file-path $HOME/.bashrc ``` -To run an interactive install that allows you to change default values run: +To run an interactive install that allows you to change default values run: ```console bash -c "$(curl -L https://raw.githubusercontent.com/oracle/oci-cli/master/scripts/install/install.sh)" ``` -Run the command below to add the `oci` command to your search path: - -```bash { target="ubuntu:latest" } -source $HOME/.bashrc -``` - Verify OCI CLI is installed using the `--version` option: -```bash { target="ubuntu:latest", env_source="~/.bashrc" } +```console oci --version ``` -Configure OCI CLI using the `setup config` command: +Configure OCI CLI using the `setup config` command: ```console oci setup config ``` -To complete the setup you will need your OCID and API key. +To complete the setup you will need your OCID and API key. Log in to the OCI web console to access your OCID and API key. -Open the Profile menu and click Tenancy: to locate your OCID. +Open the Profile menu and click Tenancy: to locate your OCID. ![oci1 #center](https://github.com/ArmDeveloperEcosystem/arm-learning-paths/assets/40816837/7d5faf0a-2b62-41a8-ac1c-66e11aa01e5d) diff --git a/content/install-guides/pytorch.md b/content/install-guides/pytorch.md index f4565a14af..a4bc339996 100644 --- a/content/install-guides/pytorch.md +++ b/content/install-guides/pytorch.md @@ -14,8 +14,6 @@ test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed title: PyTorch tool_install: true weight: 1 diff --git a/content/install-guides/streamline-cli.md b/content/install-guides/streamline-cli.md index 300eb55ba1..9b56af96d6 100644 --- a/content/install-guides/streamline-cli.md +++ b/content/install-guides/streamline-cli.md @@ -74,7 +74,7 @@ If you are using the `workflow_topdown_basic option`, ensure that your applicati ## Using Python scripts -The Python scripts provided with Streamline CLI tools require Python 3.8 or later, and depend on several third-party modules. We recommend creating a Python virtual environment containing these modules to run the tools. +The Python scripts provided with Streamline CLI tools require Python 3.8 or later, and depend on several third-party modules. It is recommended to create a Python virtual environment containing these modules to run the tools. Create a virtual environment: @@ -141,7 +141,7 @@ https://artifacts.tools.arm.com/arm-performance-studio/Streamline_CLI_Tools/ ## Applying the kernel patch -For best results, we provide a Linux kernel patch that modifies the behavior of Linux perf to improve support for capturing function-attributed top-down metrics on Arm systems. This patch provides two new capabilities: +For best results, a Linux kernel patch is available that modifies the behavior of Linux perf to improve support for capturing function-attributed top-down metrics on Arm systems. This patch provides two new capabilities: * It allows a new thread to inherit the perf counter group configuration of its parent. * It decouples the perf event-based sampling window size from the overall sample rate. This allows strobed mark-space sampling patterns where the tool can capture a small window without using a high sample rate. diff --git a/content/install-guides/terraform.md b/content/install-guides/terraform.md index 6ee6d385c3..7f8cee937a 100644 --- a/content/install-guides/terraform.md +++ b/content/install-guides/terraform.md @@ -16,8 +16,6 @@ test_images: - ubuntu:latest test_link: false test_maintenance: true -test_status: -- passed title: Terraform tool_install: true weight: 1 diff --git a/content/install-guides/wperf.md b/content/install-guides/wperf.md index ff9694ed96..aa36586b37 100644 --- a/content/install-guides/wperf.md +++ b/content/install-guides/wperf.md @@ -39,7 +39,7 @@ WindowsPerf consists of a kernel-mode driver and a user-space command-line tool. You cannot use WindowsPerf on virtual machines, such as cloud instances. {{% /notice %}} -## Using winget (Recommended) +## Using winget ### Install @@ -79,70 +79,11 @@ Starting package uninstall... Successfully uninstalled ``` -## Visual Studio and the Windows Driver Kit (WDK) - -WindowsPerf relies on `dll` files installed with Visual Studio, from the Community Edition or higher and, optionally, installers from the Windows Driver Kit extension. - -For information about the WDK installation process, see [Download the Windows Driver Kit (WDK)](https://learn.microsoft.com/en-us/windows-hardware/drivers/download-the-wdk). - -See also the [Visual Studio for Windows on Arm install guide](/install-guides/vs-woa/). - -## Download WindowsPerf - -You can download the latest release package, `windowsperf-bin-.zip` from the Linaro GitLab repository: -```url -https://gitlab.com/Linaro/WindowsPerf/windowsperf/-/releases -``` - -To download directly from command prompt, use: - -```console -mkdir windowsperf-bin-4.0.0 -cd windowsperf-bin-4.0.0 -curl -L -O https://gitlab.com/api/v4/projects/40381146/packages/generic/windowsperf/4.0.0/windowsperf-bin-4.0.0.zip -``` - -Unzip the package: - -```console -tar -xmf windowsperf-bin-4.0.0.zip -``` - -## Install wperf driver - -You can install the kernel driver using the supplied `wperf-devgen` installer. - -The [wperf-devgen](https://gitlab.com/Linaro/WindowsPerf/windowsperf/-/tree/main/wperf-devgen/README.md) tool has been designated as the preferred installer and uninstaller for the WindowsPerf Kernel Driver in the latest release. This tool offers a simple process for managing the installation and removal of the driver. - {{% notice Note%}} -You must install the driver as `Administrator`. +WinPerf is an open-source project. If you would like to develop WindowsPerf yourself, you may also need to install the Windows Driver Kit (WDK). Please refer to this link for more details. +https://learn.microsoft.com/en-us/windows-hardware/drivers/wdk-release-notes {{% /notice %}} -Open a **Windows Command Prompt** terminal with **Run as administrator** selected. - -Make sure you are in the `windowsperf-bin-` directory: - -```command -cd windowsperf-bin-4.0.0 -``` - -### Install with wperf-devgen - -Navigate to the `wperf-driver` folder and run the installer: - -```command -cd wperf-driver -wperf-devgen install -``` - -The output should be similar to: - -```output -Executing command: install. -Install requested. -Device installed successfully -``` - ## Verify install You can check everything is working by running the `wperf` executable. @@ -154,7 +95,6 @@ Once you have installed the driver, you can use `wperf` without `Administrator` For example: ```command -cd ..\wperf wperf --version ``` @@ -167,30 +107,7 @@ You see output similar to: wperf-driver 4.0.0 b18197bd +etw-drv ``` -## Uninstall wperf driver - -You can uninstall (or *remove*) the kernel driver using supplied [wperf-devgen](#devgen_uninstall) uninstaller. -{{% notice Note%}} -You must uninstall the driver as `Administrator`. -{{% /notice %}} - -### Uninstall with wperf-devgen {#devgen_uninstall} - -```command -cd windowsperf-bin-4.0.0\wperf-driver -wperf-devgen uninstall -``` - -The output is similar to: - -```console -Executing command: uninstall. -Uninstall requested. -Root\WPERFDRIVER -Device found -Device uninstalled successfully -``` ## Install WindowsPerf Virtual Studio Extension (optional) {#vs2022} diff --git a/content/learning-paths/cross-platform/adler32/_index.md b/content/learning-paths/cross-platform/adler32/_index.md new file mode 100644 index 0000000000..f038fe9c70 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/_index.md @@ -0,0 +1,56 @@ +--- +title: Write NEON intrinsics using GitHub Copilot to improve Adler32 performance + +minutes_to_complete: 45 + +who_is_this_for: This is an introductory topic for C/C++ developers who are interested in using GitHub Copilot to improve performance using NEON intrinsics. + +learning_objectives: + - Use GitHub Copilot to write NEON intrinsics that accelerate the Adler32 checksum algorithm. + +prerequisites: + - An Arm computer running Linux with the GNU compiler (gcc) installed. + - Visual Studio Code with the GitHub Copilot extension installed. + +author: Jason Andrews + +### Tags +skilllevels: Introductory +subjects: Performance and Architecture +armips: + - Neoverse + - Cortex-A +tools_software_languages: + - GCC + - Runbook + +operatingsystems: + - Linux +shared_path: true +shared_between: + - servers-and-cloud-computing + - laptops-and-desktops + - mobile-graphics-and-gaming + + +further_reading: + - resource: + title: Arm C Language Extensions + link: https://arm-software.github.io/acle/ + type: Documentation + - resource: + title: Adler-32 Checksum Algorithm + link: https://en.wikipedia.org/wiki/Adler-32 + type: Article + - resource: + title: NEON Programming Quick Reference + link: https://developer.arm.com/documentation/den0018/a + type: Documentation + + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/_next-steps.md b/content/learning-paths/cross-platform/adler32/_next-steps.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/_next-steps.md rename to content/learning-paths/cross-platform/adler32/_next-steps.md diff --git a/content/learning-paths/cross-platform/adler32/about-2.md b/content/learning-paths/cross-platform/adler32/about-2.md new file mode 100644 index 0000000000..91d7202849 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/about-2.md @@ -0,0 +1,52 @@ +--- +title: About NEON and Adler32 +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Overview + +In computing, optimizing performance is crucial for applications that process large amounts of data. This Learning Path guides you through implementing and optimizing the Adler32 checksum algorithm using Arm advanced SIMD (Single Instruction, Multiple Data) instructions. You'll learn how to leverage GitHub Copilot to simplify the development process while achieving significant performance improvements. + +## Simplifying Arm NEON Development with GitHub Copilot + +Developers recognize that Arm NEON SIMD instructions can significantly boost performance for computationally intensive applications, particularly in areas like image processing, audio/video codecs, and machine learning. However, writing NEON intrinsics directly requires specialized knowledge of the instruction set, careful consideration of data alignment, and complex vector operations that can be error-prone and time-consuming. Many developers avoid implementing these optimizations due to the steep learning curve and development overhead. + +The good news is that AI developer tools such as GitHub Copilot make working with NEON intrinsics much more accessible. By providing intelligent code suggestions, automated vectorization hints, and contextual examples tailored to your specific use case, GitHub Copilot can help bridge the knowledge gap and accelerate the development of NEON-optimized code. This allows developers to harness the full performance potential of Arm processors - without the usual complexity and overhead. + +You can demonstrate writing NEON intrinsics with GitHub Copilot by creating a full project from scratch and comparing the C implementation to a NEON-optimized version. + +While you may not create complete projects from scratch - and you shouldn't blindly trust the generated code - it's helpful to see what's possible using an example so you can apply the principles to your own projects. + +## Accelerating Adler32 with Arm NEON + +This project demonstrates how to accelerate Adler32 checksum calculations using Arm NEON instructions. + +### What is Arm NEON? + +Arm NEON is an advanced SIMD architecture extension for Arm processors. It provides a set of instructions that can process multiple data elements in parallel using specialized vector registers. NEON technology enables developers to accelerate computationally intensive algorithms by performing the same operation on multiple data points simultaneously, rather than processing them one at a time. This parallelism is particularly valuable for multimedia processing, scientific calculations, and cryptographic operations where the same operation needs to be applied to large datasets. + +## What Is the Adler32 Algorithm? + +Mark Adler developed the Adler32 checksum algorithm in 1995. It's used in the zlib compression library and is faster than CRC32 but provides less reliable error detection. + +The algorithm works by calculating two 16-bit sums: + +- s1: A simple sum of all bytes. +- s2: A sum of all s1 values after each byte. +- The final checksum is `(s2 << 16) | s1`. + +## What You'll Build + +This project walks you through building the following components using GitHub Copilot: + +- A standard C implementation of Adler32. +- A test program to validate outputs for various input sizes. +- A Makefile to build and run the program. +- Performance measurement code to record how long the algorithm takes. +- A NEON-optimized version of Adler32. +- A performance comparison table for both implementations. + +Continue to the next section to start creating the project. \ No newline at end of file diff --git a/content/learning-paths/cross-platform/adler32/build-6.md b/content/learning-paths/cross-platform/adler32/build-6.md new file mode 100644 index 0000000000..5639b7d00d --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/build-6.md @@ -0,0 +1,62 @@ +--- +title: Build and run the test program +weight: 6 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Can I Build and Run the Test Program? + +You now have all the required files to test the Adler32 algorithm: +- A C implementation of the Adler32 function. +- A test program to verify correctness and measure performance. +- A Makefile to build and run the project. + +Paste the following prompt into your GitHub Copilot Agent session: + +```console +Use the Makefile to build the project and run to make sure the checksum results are correct for all data sizes. +``` + +The output is: + +```output + +GitHub Copilot: I'll help you build and run the Adler-32 checksum project to verify the results. Let me do this step-by-step. + +First, let's build the project using the provided Makefile:Great! The build completed successfully without any errors or warnings. The Makefile compiled both source files (`adler32-simple.c` and adler32-test.c) and linked them into the executable adler32-test. + +Now let's run the tests to verify that the Adler-32 checksum implementation works correctly for all data sizes:I ran the Adler-32 checksum project, and here are the results: + +The build completed successfully without any errors or warnings. The Makefile correctly compiled both source files (`adler32-simple.c` and adler32-test.c) and linked them into the executable adler32-test. + +When running the tests, I got the following results: + +### Verification Tests +All 6 verification tests passed successfully: +1. Empty string: PASS +2. "a": PASS +3. "abc": PASS +4. "message digest": PASS +5. "abcdefghijklmnopqrstuvwxyz": PASS +6. "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789": PASS + +### Incremental API Test +The incremental API test also passed, showing that both direct and incremental calculation methods produce the same checksum (0x3d094d1a). + +### Performance Tests +The implementation was benchmarked with different data sizes: +- 1 KB: 195.31 MB/s +- 10 KB: 128.50 MB/s +- 100 KB: 261.11 MB/s +- 1 MB: 297.80 MB/s +- 10 MB: 298.17 MB/s + +The results confirm that your Adler-32 checksum implementation is correct for all data sizes and performs efficiently. The optimizations for the Neoverse N1 architecture specified in the Makefile appear to be working properly, with throughput reaching nearly 300 MB/s for larger data sizes. + +``` + +The results from GitHub Copilot confirm that the Adler32 checksum calculations are correct and provide initial performance benchmarks. These results offer a solid baseline, but a meaningful comparison requires an optimized implementation. + +In the next section, you’ll implement Adler32 using NEON intrinsics and compare its performance against this baseline. \ No newline at end of file diff --git a/content/learning-paths/cross-platform/adler32/makefile-5.md b/content/learning-paths/cross-platform/adler32/makefile-5.md new file mode 100644 index 0000000000..5e3b72b230 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/makefile-5.md @@ -0,0 +1,114 @@ +--- +title: Create a Makefile +weight: 5 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Can I Create a Makefile to Build and Run the Test Program? + +Paste the following prompt into GitHub Copilot. It tells Copilot to generate a Makefile that uses `gcc` and targets the Neoverse N1 processor for optimized performance. + +```console +Read the .c files in my project and +create a Makefile to build and run the adler32-test program using gcc. +Select the best compiler flags for Neoverse N1 to optimize for performance. +``` + +The output is: + +```output + +GitHub Copilot: I'll create a Makefile for your Adler-32 project that's optimized for the Neoverse N1 architecture. First, let's look at the project structure and understand the code files better.I see that the file search didn't return results. Let's try again with a different approach.I can see the project contains three files: adler32-simple.c, adler32-simple.h, and adler32-test.c. Let's examine the code in adler32-simple.c to understand the implementation before creating the Makefile.I've examined the code files in your project: + +1. adler32-simple.c - Contains the implementation of the Adler-32 checksum algorithm with functions for initialization, update, and finalization +2. adler32-simple.h - Presumably contains the function declarations +3. adler32-test.c - Contains test code for verifying and benchmarking the Adler-32 implementation + +Now I'll create a Makefile optimized for performance on the Neoverse N1 architecture. The Neoverse N1 is an ARM architecture that benefits from specific compiler optimizations. + +# Makefile for Adler-32 checksum implementation +# Optimized for Neoverse N1 architecture + +# Compiler and flags +CC = gcc +# Neoverse N1 specific optimization flags: +# -march=armv8.2-a+crypto: Target ARMv8.2-A architecture with crypto extensions (Neoverse N1) +# -mcpu=neoverse-n1: Specifically optimize for Neoverse N1 +# -O3: Highest standard optimization level +# -flto: Link-time optimization +# -fomit-frame-pointer: Don't keep the frame pointer in a register for functions that don't need one +# -ftree-vectorize: Vectorize loops when possible +# -ffast-math: Enable fast math optimizations +# Additional useful flags: +# -Wall -Wextra: Enable warnings +# -g: Include debugging symbols +CFLAGS = -march=armv8.2-a+crypto -mcpu=neoverse-n1 -O3 -flto -fomit-frame-pointer -ftree-vectorize -ffast-math -Wall -Wextra -g + +# Source files +SRCS = adler32-simple.c adler32-test.c +OBJS = $(SRCS:.c=.o) +TARGET = adler32-test + +# Default target +all: $(TARGET) + +# Link the executable +$(TARGET): $(OBJS) + $(CC) $(CFLAGS) -o $@ $^ + +# Compile source files +%.o: %.c + $(CC) $(CFLAGS) -c $< -o $@ + +# Run the tests +run: $(TARGET) + ./$(TARGET) + +# Clean generated files +clean: + rm -f $(OBJS) $(TARGET) + +# Rebuild the project +rebuild: clean all + +.PHONY: all run clean rebuild + + +Made changes. + +I've created a Makefile for your Adler-32 project, optimized specifically for the Neoverse N1 architecture. Let me highlight some key features of this Makefile: + +### Optimization Flags for Neoverse N1 + +The Makefile includes several compiler flags tailored for maximum performance on the Neoverse N1: + +- `-march=armv8.2-a+crypto`: Targets ARMv8.2-A architecture with crypto extensions +- `-mcpu=neoverse-n1`: Specifically optimizes for the Neoverse N1 processor +- `-O3`: Enables the highest standard optimization level +- `-flto`: Enables link-time optimization for whole-program optimization +- `-fomit-frame-pointer`: Frees up a register by not keeping frame pointers when not needed +- `-ftree-vectorize`: Automatically vectorizes loops to leverage SIMD instructions +- `-ffast-math`: Enables optimizations that might affect floating-point precision but improve performance + +### Building and Testing + +To use the Makefile, you can run the following commands: + +1. To build the project: + make + +2. To run the tests after building: + make run + +3. To clean the build files: + make clean + +4. To rebuild everything from scratch: + make rebuild + +The Makefile is designed to be simple but effective, automatically handling dependencies and providing different targets for common development tasks. +``` + +You are now ready to use the Makefile to build and run the test application. \ No newline at end of file diff --git a/content/learning-paths/cross-platform/adler32/more-11.md b/content/learning-paths/cross-platform/adler32/more-11.md new file mode 100644 index 0000000000..446a2ea136 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/more-11.md @@ -0,0 +1,18 @@ +--- +title: Other ideas for GitHub Copilot +weight: 11 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## What else can I do with GitHub Copilot on this project? + +GitHub Copilot can help you explore additional performance and optimization ideas: + +- Test different compiler flags using Agent mode to automate iteration and identify the best combinations. +- Add Clang support to your Makefile and compare performance against GCC — performance can differ significantly depending on your code structure. +- Generate a wider range of data sizes and random patterns to stress-test functionality and measure performance under varied conditions. +- Explore alternative algorithm structures that rely on compiler autovectorization instead of NEON intrinsics — you might discover better performance simply by restructuring the C code. + +AI tools won’t always generate high-performance code out of the box, but they can rapidly accelerate your experimentation and learning — especially in new areas of programming like NEON-based performance optimization. diff --git a/content/learning-paths/cross-platform/adler32/neon-7.md b/content/learning-paths/cross-platform/adler32/neon-7.md new file mode 100644 index 0000000000..cc4c9ee3cf --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/neon-7.md @@ -0,0 +1,558 @@ +--- +title: Create a NEON version of Adler32 +weight: 7 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Can I Create a NEON Version of Adler32 To Improve Performance? + +Writing NEON intrinsics is an advanced programming topic which requires time and effort to learn and apply. GitHub Copilot is a good way to get started with NEON, both to write new code and to debug issues during development. + +You can instruct Github Copilot to create a NEON-optimized version of Adler32 using the prompt below: + +```console +Add a second implementation of adler32 that processes data in blocks and uses Arm NEON intrinsics to improve performance. +Create it in the file adler32-neon.c +For inputs less than 16 bytes use a standard implementation. +Update the Makefile and other files to add this new version and compare performance. +Print out the speedup from the original version. +Make sure the checksum values are the same for both versions. +``` + +The output is: + +```output + +GitHub Copilot: I'll help you create a NEON-optimized version of adler32 and update the project to compare performance with the original implementation. Let's start by examining the existing code structure to understand the current implementation.Great, I've found the header file for the simple Adler-32 implementation. Now let's look at the implementation file to understand how it works.I've examined the existing code for the Adler32 implementation. Now let's check the test file to understand how to properly integrate our new implementation:I've analyzed your Adler32 implementation and test code. Now I'll create a NEON-optimized version that processes data in blocks, update the Makefile, and compare the performance between both implementations. + +Let's first create the NEON implementation file: + +/* + * adler32-neon.h - Header file for NEON-optimized Adler-32 checksum implementation + */ + +#ifndef ADLER32_NEON_H +#define ADLER32_NEON_H + +#include +#include + +/** + * Calculate Adler-32 checksum for a data buffer using NEON optimization + * + * @param data Pointer to the data buffer + * @param length Length of the data in bytes + * @param initial Initial Adler-32 value (typically 1) + * + * @return The calculated Adler-32 checksum + */ +uint32_t adler32_neon(const unsigned char *data, size_t length, uint32_t initial); + +/** + * Initialize Adler-32 checksum calculation + * + * @return The initial Adler-32 value (1) + */ +uint32_t adler32_neon_init(void); + +/** + * Update Adler-32 checksum with new data using NEON optimization + * + * @param adler Current Adler-32 value + * @param data Pointer to the data buffer + * @param length Length of the data in bytes + * + * @return The updated Adler-32 value + */ +uint32_t adler32_neon_update(uint32_t adler, const unsigned char *data, size_t length); + +/** + * Finalize Adler-32 calculation + * + * @param adler Current Adler-32 value + * + * @return The final Adler-32 checksum + */ +uint32_t adler32_neon_final(uint32_t adler); + +#endif /* ADLER32_NEON_H */ + + +Made changes. + +Now I'll create the implementation file for the NEON-optimized Adler32: + +/* + * adler32-neon.c - NEON-optimized implementation of Adler-32 checksum algorithm + * + * This implementation uses ARM NEON SIMD instructions to process multiple bytes in parallel + * for improved performance on Neoverse N1 and other ARM platforms with NEON support. + */ + +#include +#include +#include +#include "adler32-neon.h" + +/* Adler-32 parameters */ +#define MOD_ADLER 65521 /* Largest prime number less than 2^16 */ +#define BLOCK_SIZE 16 /* Process 16 bytes at once with NEON */ + +/* + * Standard implementation of adler32 for small inputs + * Used for inputs less than BLOCK_SIZE + */ +static uint32_t adler32_standard(const unsigned char *data, size_t length, uint32_t initial) +{ + uint32_t a = initial & 0xffff; + uint32_t b = (initial >> 16) & 0xffff; + size_t i; + + /* Process each byte */ + for (i = 0; i < length; i++) { + a = (a + data[i]) % MOD_ADLER; + b = (b + a) % MOD_ADLER; + } + + /* Combine the two 16-bit values */ + return (b << 16) | a; +} + +/* + * adler32_neon - Calculate the Adler-32 checksum of a data buffer using NEON instructions + * @data: Data buffer + * @length: Length of data buffer in bytes + * @initial: Initial Adler-32 value (typically 1) + * + * Returns the calculated Adler-32 checksum + */ +uint32_t adler32_neon(const unsigned char *data, size_t length, uint32_t initial) +{ + uint32_t a = initial & 0xffff; + uint32_t b = (initial >> 16) & 0xffff; + + /* For small inputs, use the standard implementation */ + if (length < BLOCK_SIZE) { + return adler32_standard(data, length, initial); + } + + /* Process blocks of BLOCK_SIZE bytes */ + while (length >= BLOCK_SIZE) { + /* NEON registers to accumulate sums */ + uint16x8_t va = vdupq_n_u16(0); /* 8 parallel accumulators for a */ + uint16x8_t vb = vdupq_n_u16(0); /* 8 parallel accumulators for b */ + + /* + * We'll process 16 bytes at a time: + * - Load 16 bytes into a 16x8 vector + * - Compute partial sums in vectors + */ + uint8x16_t vdata = vld1q_u8(data); + + /* Convert to 16-bit values for accumulation */ + uint16x8_t vdata_low = vmovl_u8(vget_low_u8(vdata)); + uint16x8_t vdata_high = vmovl_u8(vget_high_u8(vdata)); + + /* Accumulate a values: a += data[i] */ + va = vaddq_u16(va, vdata_low); + va = vaddq_u16(va, vdata_high); + + /* + * Accumulate b values with proper weighting: + * b += a + data[i] * (BLOCK_SIZE - i) + * + * Since we compute 16 bytes at once, weights are 16,15,...,1 + */ + static const uint16_t weights[16] = {16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1}; + uint16x8_t vweights_low = vld1q_u16(weights); + uint16x8_t vweights_high = vld1q_u16(weights + 8); + + uint16x8_t weighted_low = vmulq_u16(vdata_low, vweights_low); + uint16x8_t weighted_high = vmulq_u16(vdata_high, vweights_high); + + vb = vaddq_u16(vb, weighted_low); + vb = vaddq_u16(vb, weighted_high); + + /* Accumulate partial sums into scalar a and b values */ + uint16_t partial_a[8]; + uint16_t partial_b[8]; + vst1q_u16(partial_a, va); + vst1q_u16(partial_b, vb); + + for (size_t i = 0; i < 8; i++) { + a += partial_a[i]; + b += partial_b[i]; + } + + /* Apply modulo to avoid overflow */ + a %= MOD_ADLER; + b = (b + 16 * a) % MOD_ADLER; /* Add BLOCK_SIZE * a to account for sequential weights */ + + /* Move to next block */ + data += BLOCK_SIZE; + length -= BLOCK_SIZE; + } + + /* Process remaining bytes using standard implementation */ + if (length > 0) { + uint32_t adler = (b << 16) | a; + adler = adler32_standard(data, length, adler); + return adler; + } + + /* Combine the two 16-bit values */ + return (b << 16) | a; +} + +/* + * adler32_neon_init - Initialize an Adler-32 checksum + * + * Returns the initial Adler-32 value + */ +uint32_t adler32_neon_init(void) +{ + return 1; +} + +/* + * adler32_neon_update - Update Adler-32 checksum with new data using NEON optimization + * @adler: Current Adler-32 value + * @data: Data buffer + * @length: Length of data buffer in bytes + * + * Returns the updated Adler-32 value + */ +uint32_t adler32_neon_update(uint32_t adler, const unsigned char *data, size_t length) +{ + return adler32_neon(data, length, adler); +} + +/* + * adler32_neon_final - Finalize Adler-32 calculation + * @adler: Current Adler-32 value + * + * Returns the final Adler-32 checksum + */ +uint32_t adler32_neon_final(uint32_t adler) +{ + return adler; +} + + +Made changes. + +Now I'll update the test file to add performance comparisons between the original and NEON implementations: + +/* + * adler32-test.c - Test and benchmark for Adler-32 checksum implementation + */ + +#include +#include +#include +#include +#include +#include "adler32-simple.h" +#include "adler32-neon.h" + +/* Known test vectors for Adler-32 */ +struct test_vector { + const char *data; + uint32_t expected; +}; + +/* Test data sizes in bytes */ +#define KB (1024) +#define MB (1024 * 1024) + +static const struct test_vector test_vectors[] = { + { "", 0x00000001 }, + { "a", 0x00620062 }, + { "abc", 0x024d0127 }, + { "message digest", 0x29750586 }, + { "abcdefghijklmnopqrstuvwxyz", 0x90860b20 }, + { "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 0x8adb150c }, +}; + +/* Structure to hold test data */ +struct test_data { + unsigned char *buffer; + size_t size; + const char *name; +}; + +/* Generate random test data of specified size */ +static unsigned char* generate_test_data(size_t size) { + unsigned char *data = malloc(size); + if (!data) { + fprintf(stderr, "Failed to allocate memory for test data\n"); + exit(EXIT_FAILURE); + } + + for (size_t i = 0; i < size; i++) { + data[i] = rand() & 0xFF; + } + + return data; +} + +/* Performance test result structure */ +struct perf_result { + double time; + double throughput; + uint32_t checksum; +}; + +/* Run performance test on a data buffer using the specified implementation */ +static struct perf_result test_performance_impl(const unsigned char *data, size_t size, + uint32_t (*adler_func)(const unsigned char *, size_t, uint32_t), + uint32_t (*init_func)(void), + const char *impl_name) { + clock_t start, end; + struct perf_result result; + + /* Warm-up run */ + adler_func(data, size, init_func()); + + /* Timed run */ + start = clock(); + result.checksum = adler_func(data, size, init_func()); + end = clock(); + + result.time = ((double) (end - start)) / CLOCKS_PER_SEC; + result.throughput = (double)size / (result.time * MB); + + printf(" %s:\n", impl_name); + printf(" Checksum: 0x%08x\n", result.checksum); + printf(" Time: %.6f seconds\n", result.time); + printf(" Throughput: %.2f MB/s\n", result.throughput); + + return result; +} + +/* Run performance test on a data buffer for both implementations */ +static void test_performance(const unsigned char *data, size_t size, const char *test_name) { + printf("Testing %s (%zu bytes):\n", test_name, size); + + /* Test the simple implementation */ + struct perf_result simple = test_performance_impl(data, size, adler32, adler32_init, "Simple implementation"); + + /* Test the NEON implementation */ + struct perf_result neon = test_performance_impl(data, size, adler32_neon, adler32_neon_init, "NEON implementation"); + + /* Check if checksums match */ + if (simple.checksum != neon.checksum) { + printf(" CHECKSUM MISMATCH: Simple 0x%08x, NEON 0x%08x\n", + simple.checksum, neon.checksum); + } + + /* Calculate speedup */ + double speedup = simple.time / neon.time; + printf(" Speedup: %.2fx\n\n", speedup); +} + +/* Run verification tests with known test vectors */ +static void run_verification_tests(void) { + printf("Running verification tests:\n"); + printf("==========================\n"); + + int passed_simple = 0; + int passed_neon = 0; + int total = sizeof(test_vectors) / sizeof(test_vectors[0]); + + for (size_t i = 0; i < total; i++) { + const struct test_vector *tv = &test_vectors[i]; + uint32_t result_simple = adler32((const unsigned char *)tv->data, strlen(tv->data), adler32_init()); + uint32_t result_neon = adler32_neon((const unsigned char *)tv->data, strlen(tv->data), adler32_neon_init()); + + printf("Test %zu: \"%s\"\n", i + 1, tv->data); + printf(" Expected: 0x%08x\n", tv->expected); + printf(" Simple: 0x%08x (%s)\n", result_simple, + (result_simple == tv->expected) ? "PASS" : "FAIL"); + printf(" NEON: 0x%08x (%s)\n", result_neon, + (result_neon == tv->expected) ? "PASS" : "FAIL"); + + if (result_simple == tv->expected) passed_simple++; + if (result_neon == tv->expected) passed_neon++; + + /* Check if both implementations match */ + if (result_simple != result_neon) { + printf(" WARNING: Simple and NEON implementations produce different results!\n"); + } + + printf("\n"); + } + + printf("Verification summary:\n"); + printf(" Simple: %d/%d tests passed\n", passed_simple, total); + printf(" NEON: %d/%d tests passed\n\n", passed_neon, total); +} + +/* Run performance tests with different data sizes */ +static void run_performance_tests(void) { + printf("Running performance tests:\n"); + printf("=========================\n"); + + /* Define test data sizes */ + struct test_data tests[] = { + { NULL, 1 * KB, "1 KB" }, + { NULL, 10 * KB, "10 KB" }, + { NULL, 100 * KB, "100 KB" }, + { NULL, 1 * MB, "1 MB" }, + { NULL, 10 * MB, "10 MB" } + }; + + /* Generate random test data for each size */ + for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + tests[i].buffer = generate_test_data(tests[i].size); + test_performance(tests[i].buffer, tests[i].size, tests[i].name); + } + + /* Free allocated memory */ + for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + free(tests[i].buffer); + } +} + +/* Test the incremental API for both implementations */ +static void test_incremental_api(void) { + printf("Testing incremental API:\n"); + printf("=======================\n"); + + /* Generate test data */ + size_t size = 1 * MB; + unsigned char *data = generate_test_data(size); + + printf("Simple implementation:\n"); + /* Calculate checksum in one go */ + uint32_t direct = adler32(data, size, adler32_init()); + + /* Calculate checksum incrementally */ + uint32_t incremental = adler32_init(); + + /* Process in chunks of 64KB */ + size_t chunk_size = 64 * KB; + for (size_t offset = 0; offset < size; offset += chunk_size) { + size_t remaining = size - offset; + size_t current_chunk = (remaining < chunk_size) ? remaining : chunk_size; + incremental = adler32_update(incremental, data + offset, current_chunk); + } + incremental = adler32_final(incremental); + + /* Compare results */ + printf(" Direct calculation: 0x%08x\n", direct); + printf(" Incremental calculation: 0x%08x\n", incremental); + printf(" Match: %s\n\n", (direct == incremental) ? "YES" : "NO"); + + printf("NEON implementation:\n"); + /* Calculate checksum in one go */ + uint32_t direct_neon = adler32_neon(data, size, adler32_neon_init()); + + /* Calculate checksum incrementally */ + uint32_t incremental_neon = adler32_neon_init(); + + /* Process in chunks of 64KB */ + for (size_t offset = 0; offset < size; offset += chunk_size) { + size_t remaining = size - offset; + size_t current_chunk = (remaining < chunk_size) ? remaining : chunk_size; + incremental_neon = adler32_neon_update(incremental_neon, data + offset, current_chunk); + } + incremental_neon = adler32_neon_final(incremental_neon); + + /* Compare results */ + printf(" Direct calculation: 0x%08x\n", direct_neon); + printf(" Incremental calculation: 0x%08x\n", incremental_neon); + printf(" Match: %s\n\n", (direct_neon == incremental_neon) ? "YES" : "NO"); + + /* Compare simple vs NEON */ + printf("Cross implementation comparison:\n"); + printf(" Simple checksum: 0x%08x\n", direct); + printf(" NEON checksum: 0x%08x\n", direct_neon); + printf(" Match: %s\n\n", (direct == direct_neon) ? "YES" : "NO"); + + free(data); +} + +int main(void) { + /* Seed random number generator */ + srand((unsigned int)time(NULL)); + + printf("Adler-32 Test and Benchmark\n"); + printf("===========================\n\n"); + + /* Run verification tests with known test vectors */ + run_verification_tests(); + + /* Test the incremental API */ + test_incremental_api(); + + /* Run performance tests with different data sizes */ + run_performance_tests(); + + return 0; +} + + +Made changes. + +Let's update the Makefile to include the NEON implementation in the build: + +# Makefile for Adler-32 checksum implementation +# Optimized for Neoverse N1 architecture + +# Compiler and flags +CC = gcc +# Neoverse N1 specific optimization flags: +# -march=armv8.2-a+crypto: Target ARMv8.2-A architecture with crypto extensions (Neoverse N1) +# -mcpu=neoverse-n1: Specifically optimize for Neoverse N1 +# -O3: Highest standard optimization level +# -flto: Link-time optimization +# -fomit-frame-pointer: Don't keep the frame pointer in a register for functions that don't need one +# -ftree-vectorize: Vectorize loops when possible +# -ffast-math: Enable fast math optimizations +# Additional useful flags: +# -Wall -Wextra: Enable warnings +# -g: Include debugging symbols +CFLAGS = -march=armv8.2-a+crypto -mcpu=neoverse-n1 -O3 -flto -fomit-frame-pointer -ftree-vectorize -ffast-math -Wall -Wextra -g + +# Source files +SRCS = adler32-simple.c adler32-neon.c adler32-test.c +OBJS = $(SRCS:.c=.o) +TARGET = adler32-test + +# Default target +all: $(TARGET) + +# Link the executable +$(TARGET): $(OBJS) + $(CC) $(CFLAGS) -o $@ $^ + +# Compile source files +%.o: %.c + $(CC) $(CFLAGS) -c $< -o $@ + +# Run the tests +run: $(TARGET) + ./$(TARGET) + +# Clean generated files +clean: + rm -f $(OBJS) $(TARGET) + +# Rebuild the project +rebuild: clean all + +.PHONY: all run clean rebuild + + +Made changes. + +Now that I've created the NEON implementation and updated the test code and Makefile, let's compile and run the code to see the performance comparison. + +``` + +GitHub Copilot generated a NEON-optimized implementation of Adler32, along with updates to the test program and Makefile. These changes make it easy to compare the performance of both versions. + +Continue to the next section to compile and run the NEON version, and to compare its performance against the original implementation. diff --git a/content/learning-paths/cross-platform/adler32/neon-debug-9.md b/content/learning-paths/cross-platform/adler32/neon-debug-9.md new file mode 100644 index 0000000000..9c69ef7689 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/neon-debug-9.md @@ -0,0 +1,99 @@ +--- +title: Debug the NEON version to match the standard C version +weight: 9 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Can I Debug the NEON Version Adler32 and Compare Performance? + +In the previous step, GitHub Copilot revealed that the NEON implementation did not produce the same checksum results as the standard C version. + +Use the prompt below to instruct GitHub Copilot to try to debug and fix the issue. + +```console +Can you modify adler32-neon.c to produce the correct checksum results for the adler32 algorithm? The test program is adler32-test.c and the simple version is adler32-simple.c +The NEON version should produce the same checksum as adler32-simple.c but run faster. +``` + +If the LLM you’re using cannot resolve the NEON functional issues, consider trying another model, such as Gemini 2.5 Pro. + +The output is: + +```output +GitHub Copilot: Okay, I can help with that. The issue seems to be in how the `a` and `b` accumulators are updated within the NEON processing loop. The update to `b` needs to correctly incorporate the value of `a` *before* it was updated in the current block, along with the weighted sum of the data bytes. + +I will modify the `adler32_neon` function in adler32-neon.c to correctly calculate the sums `s1` (sum of bytes) and `s2` (weighted sum of bytes) for each block using NEON intrinsics, and then update the scalar `a` and `b` values according to the Adler-32 definition: `a = (a_old + s1) % MOD_ADLER` and `b = (b_old + BLOCK_SIZE * a_old + s2) % MOD_ADLER`. + +uint32_t adler32_neon(const unsigned char *data, size_t length, uint32_t initial) +{ + uint32_t a = initial & 0xffff; + uint32_t b = (initial >> 16) & 0xffff; + + /* For small inputs, use the standard implementation */ + if (length < BLOCK_SIZE) { + return adler32_standard(data, length, initial); + } + + /* Preload weights */ + static const uint16_t weights[16] = {16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1}; + uint16x8_t vweights_low = vld1q_u16(weights); + uint16x8_t vweights_high = vld1q_u16(weights + 8); + + /* Process blocks of BLOCK_SIZE bytes */ + while (length >= BLOCK_SIZE) { + /* Store old 'a' before processing the block */ + uint32_t a_old = a; + + /* Load 16 bytes */ + uint8x16_t vdata = vld1q_u8(data); + + /* Widen bytes to 16 bits */ + uint16x8_t vdata_low = vmovl_u8(vget_low_u8(vdata)); + uint16x8_t vdata_high = vmovl_u8(vget_high_u8(vdata)); + + /* Calculate S1 = sum(data[i]) for the block */ + uint16x8_t vs1_sum = vaddq_u16(vdata_low, vdata_high); + /* Horizontal sum for S1 */ + uint32x4_t vs1_pairs = vpaddlq_u16(vs1_sum); + uint64x2_t vs1_quads = vpaddlq_u32(vs1_pairs); + uint32_t block_s1 = (uint32_t)(vgetq_lane_u64(vs1_quads, 0) + vgetq_lane_u64(vs1_quads, 1)); + + /* Calculate S2 = sum((BLOCK_SIZE-i) * data[i]) for the block */ + uint16x8_t weighted_low = vmulq_u16(vdata_low, vweights_low); + uint16x8_t weighted_high = vmulq_u16(vdata_high, vweights_high); + uint16x8_t vs2_sum = vaddq_u16(weighted_low, weighted_high); + /* Horizontal sum for S2 */ + uint32x4_t vs2_pairs = vpaddlq_u16(vs2_sum); + uint64x2_t vs2_quads = vpaddlq_u32(vs2_pairs); + uint32_t block_s2 = (uint32_t)(vgetq_lane_u64(vs2_quads, 0) + vgetq_lane_u64(vs2_quads, 1)); + + /* Update scalar a and b */ + a = (a_old + block_s1) % MOD_ADLER; + b = (b + (BLOCK_SIZE * a_old) + block_s2) % MOD_ADLER; + + /* Move to next block */ + data += BLOCK_SIZE; + length -= BLOCK_SIZE; + } + + /* Process remaining bytes using standard implementation */ + if (length > 0) { +// ...existing code... + + +Made changes. + +I've modified the `adler32_neon` function in adler32-neon.c to correctly calculate the checksum using NEON intrinsics. + +Now, I'll check for any compilation errors and then rebuild and run the test program to verify the results.I have modified the `adler32_neon` function in adler32-neon.c to correctly calculate the block sums and update the `a` and `b` accumulators according to the Adler-32 algorithm definition, using NEON intrinsics for the block processing. + +I then checked for errors in the modified file and rebuilt the project using `make`. The build completed successfully, although there's a minor compiler warning in adler32-test.c about comparing signed and unsigned integers, which is unrelated to the changes made. + +You can now run the test program adler32-test to verify that the NEON implementation produces the same results as the simple implementation. +``` + +GitHub Copilot successfully modified the NEON version to produce correct checksum results matching the standard C implementation. + +Continue to the next section to summarize the project, highlight the performance improvements, and generate a full project description. \ No newline at end of file diff --git a/content/learning-paths/cross-platform/adler32/neon-run-8.md b/content/learning-paths/cross-platform/adler32/neon-run-8.md new file mode 100644 index 0000000000..ff2a761cc7 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/neon-run-8.md @@ -0,0 +1,128 @@ +--- +title: Compare the NEON version to the standard C version of Adler32 +weight: 8 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Can I Run the NEON Version of Adler32 and Compare Performance? + +Before comparing performance, you need to ensure that the NEON implementation builds, runs, and produces the same results as the standard C version. + +To build the NEON version and compare it to the C version use the prompt below in your GitHub Copilot Agent session: + +```console +Use the Makefile to compile and run the new test program and compare the checksum values and performance of each implementation. +``` + +The output is: + +```output + +GitHub Copilot: I'll help you compile and run the test program to compare the checksum implementations. Let's first examine the Makefile to understand how to build and run the tests.I've examined the Makefile and now I understand how to compile and run the test program. The Makefile contains targets for building the adler32 implementations and running the tests. + +Let's also look at the test program to understand what it's testing:I've examined the Makefile and the test program (adler32-test.c). Now I understand how we can compile and run the tests to compare the checksum implementations. The adler32-test.c file contains comprehensive tests that will: + +1. Verify both implementations with known test vectors +2. Test the incremental API for both implementations +3. Run performance benchmarks with different data sizes (1KB to 10MB) + +Let me compile and run the test program using the Makefile:I see that the `make clean && make` command completed, but it didn't show detailed output. Let's run the test program to see the comparison results:I've run the Adler-32 test program for you. The test results show that the current implementation has a few issues that need to be addressed. + +Here's what I found from running the tests: + +1. **Verification Tests**: The program only tested one implementation against known test vectors (all 6 tests passed). However, it should be testing both the simple and NEON implementations separately, as designed in the test code. + +2. **Incremental API Test**: Only one implementation was tested in the incremental mode, but the test program is designed to test both. + +3. **Performance Tests**: The benchmarks only showed results for a single implementation rather than comparing both the simple and NEON versions as intended. + +The issue appears to be that the test program is not fully comparing both implementations as it was designed to do. This suggests there might be a mismatch between what's implemented and what's being tested. + +Let's check the simple implementation to understand what functions are available:I've compiled and run the Adler-32 test program to compare the implementations. Let me share what I found. + +The test program ran, but it's not properly comparing both implementations as it was designed to do. Here's what I discovered: + +1. The test passed all 6 verification tests, but it only appears to be testing one implementation against the known test vectors, not both the simple and NEON implementations as intended. + +2. Only one implementation was tested in the incremental API tests, while the code is written to test both. + +3. The performance benchmarks only showed results for a single implementation rather than comparing both as intended. For reference, the single implementation achieved: + - 1KB: 195.31 MB/s + - 10KB: 279.02 MB/s + - 100KB: 298.64 MB/s + - 1MB: 298.24 MB/s + - 10MB: 298.03 MB/s + +Looking at the test code and the adler32-simple.h header file, I can see the issue is likely related to the test program not properly executing the comparison between both implementations. + +Let me fix this by updating the test file so it properly tests both implementations. Then we can run it again to get the comparative results.I've reviewed the test program (adler32-test.c) and found that it's correctly designed to compare both the simple and NEON implementations. + +There seems to be an issue with how the tests are being run or how the implementations are connected. The test code expects to compare both implementations, but when it ran, it only showed results for a single implementation. + +Let's run the tests again, but with verbose output to better understand what's happening:I've run the tests to compare the Adler-32 implementations, and now I can provide you with the detailed results. + +## Test Results + +### Verification Tests +- Both implementations were tested against known test vectors. +- The Simple implementation passed all 6 test cases (100%). +- The NEON implementation passed 4 out of 6 test cases (67%). +- There were mismatches in test case #5 (alphabet string) and test case #6 (alphanumeric string). + +### Incremental API Test +- Both implementations showed consistency between direct and incremental calculation methods. +- However, the checksums between the Simple and NEON implementations don't match. + +### Performance Tests +For each test size, here are the results: + +1. **1 KB Data**: + - Simple: 195.31 MB/s + - NEON: 976.56 MB/s + - Speedup: 5.00x + - Checksums don't match + +2. **10 KB Data**: + - Simple: 287.22 MB/s + - NEON: 1085.07 MB/s + - Speedup: 3.78x + - Checksums don't match + +3. **100 KB Data**: + - Simple: 295.93 MB/s + - NEON: 1302.08 MB/s + - Speedup: 4.40x + - Checksums don't match + +4. **1 MB Data**: + - Simple: 297.97 MB/s + - NEON: 1305.48 MB/s + - Speedup: 4.38x + - Checksums don't match + +5. **10 MB Data**: + - Simple: 297.95 MB/s + - NEON: 1310.44 MB/s + - Speedup: 4.40x + - Checksums don't match + +## Analysis + +The NEON implementation shows impressive performance gains, with speedups ranging from 3.78x to 5.00x compared to the simple implementation. This demonstrates the effectiveness of using SIMD (Single Instruction, Multiple Data) instructions for checksum calculations. + +However, there's a critical issue: **the checksum values from the NEON implementation don't match those from the simple implementation**. Since the simple implementation passes all verification tests, this indicates that the NEON implementation has a bug that needs to be fixed. + +The most likely issue is in the NEON vector processing logic. From the test results, it appears the NEON implementation is correctly handling shorter inputs (the first 4 test cases passed) but fails on longer inputs. This suggests the bug might be related to how it processes larger blocks of data. + +## Conclusion + +The NEON implementation offers significant performance improvements (up to 5x faster), which would be valuable for applications processing large amounts of data. However, it produces incorrect checksum values currently, making it unsuitable for production use until fixed. + +If you'd like, I can analyze the code to identify and fix the issue in the NEON implementation so you can maintain both correctness and the performance benefits. +``` + +GitHub Copilot determined that while the NEON version delivers better performance, it currently produces incorrect checksums. This must be addressed before the NEON implementation can be considered correct or production-ready. + +In the next section, you'll fix the functional issues in the NEON implementation and re-run the performance comparison. \ No newline at end of file diff --git a/content/learning-paths/cross-platform/adler32/simple-c-3.md b/content/learning-paths/cross-platform/adler32/simple-c-3.md new file mode 100644 index 0000000000..76a17750b5 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/simple-c-3.md @@ -0,0 +1,196 @@ +--- +title: Create a C Version of Adler32 +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Can I Generate a C Function for Adler32? + +You can use GitHub Copilot to generate a simple C version of the Adler32 checksum algorithm. + +On your Arm Linux computer, create a new directory and enter the empty directory: + +```console +mkdir adler32 && cd adler32 +``` + +Open VS Code in the new directory to start the project: + +```console +code . & +``` + +Open GitHub Copilot, select the Large Language Model (LLM) you want to use, and select `Agent` mode. + +{{% notice Note %}} +Your results will be different based on the model you use. The model used for the output shown is Claude 3.7 Sonnet. +{{% /notice %}} + +Copy and paste the information below as a GitHub Copilot prompt and click the `Send` button. + +```console +Write a basic C implementation of the adler32 checksum function. +Write the function in a file named adler32-simple.c that can be called from another C file. +Don't create a C main function yet. +``` + +The output is: + +```output + +GitHub Copilot: I'll create a basic C implementation of the Adler-32 checksum function in a file named adler32-simple.c. Let me do that for you. + +First, let me check if the file exists already:The file doesn't exist yet, so I'll create it. Let me write the Adler-32 checksum implementation for you. + +The Adler-32 checksum algorithm works by calculating two 16-bit checksums A and B and combining them into a single 32-bit checksum. It's a simple algorithm that provides decent error detection with minimal computational overhead. + +/* + * adler32-simple.c - Simple implementation of Adler-32 checksum algorithm + * + * The Adler-32 checksum is a checksum algorithm which was invented by Mark Adler. + * It is used in the zlib compression library and specified in RFC 1950. + */ + +#include +#include + +/* Adler-32 parameters */ +#define MOD_ADLER 65521 /* Largest prime number less than 2^16 */ + +/* + * adler32 - Calculate the Adler-32 checksum of a data buffer + * @data: Data buffer + * @length: Length of data buffer in bytes + * @initial: Initial Adler-32 value (typically 1) + * + * Returns the calculated Adler-32 checksum + */ +uint32_t adler32(const unsigned char *data, size_t length, uint32_t initial) +{ + uint32_t a = initial & 0xffff; + uint32_t b = (initial >> 16) & 0xffff; + size_t i; + + /* Process each byte */ + for (i = 0; i < length; i++) { + a = (a + data[i]) % MOD_ADLER; + b = (b + a) % MOD_ADLER; + } + + /* Combine the two 16-bit values */ + return (b << 16) | a; +} + +/* + * adler32_init - Initialize an Adler-32 checksum + * + * Returns the initial Adler-32 value + */ +uint32_t adler32_init(void) +{ + return 1; +} + +/* + * adler32_update - Update Adler-32 checksum with new data + * @adler: Current Adler-32 value + * @data: Data buffer + * @length: Length of data buffer in bytes + * + * Returns the updated Adler-32 value + */ +uint32_t adler32_update(uint32_t adler, const unsigned char *data, size_t length) +{ + return adler32(data, length, adler); +} + +/* + * adler32_final - Finalize Adler-32 calculation + * @adler: Current Adler-32 value + * + * Returns the final Adler-32 checksum + */ +uint32_t adler32_final(uint32_t adler) +{ + return adler; +} + + +Made changes. + +Now let's create a header file to make the Adler-32 functions accessible from other C files: + +/* + * adler32-simple.h - Header file for Adler-32 checksum implementation + */ + +#ifndef ADLER32_SIMPLE_H +#define ADLER32_SIMPLE_H + +#include +#include + +/** + * Calculate Adler-32 checksum for a data buffer + * + * @param data Pointer to the data buffer + * @param length Length of the data in bytes + * @param initial Initial Adler-32 value (typically 1) + * + * @return The calculated Adler-32 checksum + */ +uint32_t adler32(const unsigned char *data, size_t length, uint32_t initial); + +/** + * Initialize Adler-32 checksum calculation + * + * @return The initial Adler-32 value (1) + */ +uint32_t adler32_init(void); + +/** + * Update Adler-32 checksum with new data + * + * @param adler Current Adler-32 value + * @param data Pointer to the data buffer + * @param length Length of the data in bytes + * + * @return The updated Adler-32 value + */ +uint32_t adler32_update(uint32_t adler, const unsigned char *data, size_t length); + +/** + * Finalize Adler-32 calculation + * + * @param adler Current Adler-32 value + * + * @return The final Adler-32 checksum + */ +uint32_t adler32_final(uint32_t adler); + +#endif /* ADLER32_SIMPLE_H */ + + +Made changes. + +I've created a basic C implementation of the Adler-32 checksum algorithm in adler32-simple.c as requested. I also added a corresponding header file adler32-simple.h to make the functions accessible from other C files. + +The implementation includes: + +1. `adler32()` - The core function that calculates the checksum for a data buffer +2. `adler32_init()` - Function to initialize an Adler-32 checksum (returns 1) +3. `adler32_update()` - Function to update a checksum with new data +4. `adler32_final()` - Function to finalize the checksum calculation + +These functions follow the standard pattern for checksum/hash functions, allowing you to calculate checksums for data in chunks rather than all at once if needed. + +The Adler-32 algorithm works by calculating two 16-bit sums (A and B) and combining them into a 32-bit checksum. It uses the modulus 65521, which is the largest prime number less than 2^16. + +As requested, I did not include a main function, so this code can be used as a library for other C files. +``` + +You now have the files `adler32-simple.c` and `adler32-simple.h` in your directory. These serve as the reference implementation of the Adler32 algorithm with baseline performance. + +Continue to the next section to create the test application. \ No newline at end of file diff --git a/content/learning-paths/cross-platform/adler32/summary-10.md b/content/learning-paths/cross-platform/adler32/summary-10.md new file mode 100644 index 0000000000..0c87aef025 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/summary-10.md @@ -0,0 +1,109 @@ +--- +title: Summarize the project with a README.md file +weight: 10 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Can I Summarize the Project Results? + +You can use GitHub Copilot to generate a project summary in a README file. + +Use the prompt below to collaborate with GitHub Copilot Agent to generate your README. + +Review and refine the results to align them with your project's goals. + +```console +Review the files in my project. +Create a README.md file that describes the background and overview of the project. +Explain the two implementations of the adler32 checksum. +Add a note that the performance results recorded on the Neoverse N1 processor. +Use a table to compare the original version and the NEON version and show the performance improvement factor. +``` + +Below is the created README.md file. The formatting doesn't match the Learning Path template exactly, but you can copy the README file to a new repository in GitHub for improved results. + +## Adler-32 Checksum Implementation Comparison + +### Background + +The Adler-32 checksum is an algorithm invented by Mark Adler, used in the zlib compression library and specified in RFC 1950. It provides a fast way to calculate a checksum for data integrity verification, balancing speed and reliability. It generates a 32-bit integer value based on the input data. + +### Overview + +This project provides and compares two implementations of the Adler-32 checksum algorithm: + +1. A simple, standard C implementation. +2. An optimized implementation using ARM NEON SIMD instructions. + +The goal is to demonstrate the performance benefits of using NEON intrinsics for this type of computation on compatible ARM architectures, such as the Neoverse N1. + +The project includes: +* Source code for both implementations (`adler32-simple.c`, `adler32-neon.c`). +* Header files (`adler32-simple.h`, `adler32-neon.h`). +* A test and benchmark program (`adler32-test.c`) to verify correctness and measure performance. +* A `Makefile` for easy compilation and testing. + +### Implementations + +#### 1. Simple Implementation (`adler32-simple.c`) + +This is a straightforward C implementation following the standard Adler-32 algorithm definition. It processes the input data byte by byte, updating two 16-bit accumulators (`a` and `b`) modulo 65521 (the largest prime smaller than 2^16). + +#### 2. NEON-Optimized Implementation (`adler32-neon.c`) + +This implementation leverages ARM NEON SIMD (Single Instruction, Multiple Data) instructions to accelerate the checksum calculation. Key aspects include: +* Processing data in blocks (16 bytes at a time). +* Using NEON intrinsics (`vld1q_u8`, `vmovl_u8`, `vaddq_u16`, `vpaddlq_u16`, `vmulq_u16`, etc.) to perform parallel operations on data vectors. +* Calculating the sums `S1` (sum of bytes) and `S2` (weighted sum) for each block using vector operations. +* Updating the scalar `a` and `b` accumulators based on the block results. +* Falling back to the standard implementation for data lengths smaller than the block size or for the remaining bytes after processing full blocks. + +### Performance + +The performance of both implementations was measured on an **AWS Graviton2 processor (based on Arm Neoverse N1 cores)**. The benchmark program (`adler32-test`) calculates the checksum for various data sizes and measures the time taken and throughput (in MB/s). + +The following table summarizes the throughput results and the performance improvement factor of the NEON version compared to the simple version: + +| Data Size | Simple Throughput (MB/s) | NEON Throughput (MB/s) | Speedup Factor | +| :-------- | :----------------------- | :--------------------- | :------------- | +| 1 KB | 244.14 | 976.56 | 4.00x | +| 10 KB | 295.93 | 3255.21 | 11.00x | +| 100 KB | 298.64 | 3150.20 | 10.55x | +| 1 MB | 298.33 | 3215.43 | 10.78x | +| 10 MB | 298.37 | 3194.89 | 10.71x | + +**Note:** Performance results can vary based on the specific hardware, compiler, and system load. The results above demonstrate a significant performance improvement (around **10-11x** for larger data sizes) when using NEON optimization on the Neoverse N1 architecture. + +### Building and Running + +Use the provided `Makefile`: + +```bash +# Compile the code +make + +# Run verification and performance tests +make run + +# Clean up generated files +make clean +``` + +The table summarizes the speedup obtained by the NEON version. + +Using Agent mode in GitHub Copilot is a significant benefit when you are actively building and running software. Agent mode can create files and modify them to make needed improvements. + +### Tips for Using GitHub Copilot Effectively + +This project was completed using GitHub Copilot Agent without modifying the generated files. While that might not be practical in every case, the demonstration shows how NEON intrinsics can significantly boost performance. + +GitHub Copilot is especially useful for: +* Generating vectorized versions of scalar code. +* Writing and adapting NEON intrinsics. +* Identifying and fixing bugs in complex low-level code, even for developers who aren’t SIMD experts. + +Make sure to try different LLMs with Copilot as the results will vary greatly depending on the model. + + diff --git a/content/learning-paths/cross-platform/adler32/test-prog-4.md b/content/learning-paths/cross-platform/adler32/test-prog-4.md new file mode 100644 index 0000000000..6bc0e260d6 --- /dev/null +++ b/content/learning-paths/cross-platform/adler32/test-prog-4.md @@ -0,0 +1,249 @@ +--- +title: Create a test program +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## How Do I Prompt Copilot to Generate a Test Program? + +You can now move on to create a test program using the prompt below. + +Copy and paste the prompt into GitHub Copilot to generate a test program: + +```console +Create a C file named adler32-test.c which includes a main() function and calls the adler32 function in adler32-simple.c +Add code around the adler32 calls to measure the performance of the adler32 checksum function. +Generate a simple test to make sure the adler32 generates the expected checksum. +Add randomly generated test data of various sizes: 1 kb, 10 kb, 100 kb, 1 mb, and 10 mb. +Add code to run all test data with different sizes and measure performance. +Don't create the Makefile yet. +``` + +The output is shown below: + +```output + +GitHub Copilot: I'll create a test file for the Adler-32 implementation that includes performance testing with different data sizes. I'll first check the current implementation to understand how to use the functions properly.Now I understand how the Adler-32 implementation works. Let me create a test file that measures the performance of the adler32 function with different data sizes and verifies that it produces the expected checksums. + +/* + * adler32-test.c - Test and benchmark for Adler-32 checksum implementation + */ + +#include +#include +#include +#include +#include +#include "adler32-simple.h" + +/* Known test vectors for Adler-32 */ +struct test_vector { + const char *data; + uint32_t expected; +}; + +/* Test data sizes in bytes */ +#define KB (1024) +#define MB (1024 * 1024) + +static const struct test_vector test_vectors[] = { + { "", 0x00000001 }, + { "a", 0x00620062 }, + { "abc", 0x024d0127 }, + { "message digest", 0x29750586 }, + { "abcdefghijklmnopqrstuvwxyz", 0x90860b20 }, + { "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 0x8adb150c }, +}; + +/* Structure to hold test data */ +struct test_data { + unsigned char *buffer; + size_t size; + const char *name; +}; + +/* Generate random test data of specified size */ +static unsigned char* generate_test_data(size_t size) { + unsigned char *data = malloc(size); + if (!data) { + fprintf(stderr, "Failed to allocate memory for test data\n"); + exit(EXIT_FAILURE); + } + + for (size_t i = 0; i < size; i++) { + data[i] = rand() & 0xFF; + } + + return data; +} + +/* Run performance test on a data buffer */ +static void test_performance(const unsigned char *data, size_t size, const char *test_name) { + clock_t start, end; + double cpu_time_used; + uint32_t checksum; + + printf("Testing %s (%zu bytes):\n", test_name, size); + + /* Warm-up run */ + adler32(data, size, adler32_init()); + + /* Timed run */ + start = clock(); + checksum = adler32(data, size, adler32_init()); + end = clock(); + + cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; + double throughput = (double)size / (cpu_time_used * MB); + + printf(" Checksum: 0x%08x\n", checksum); + printf(" Time: %.6f seconds\n", cpu_time_used); + printf(" Throughput: %.2f MB/s\n\n", throughput); +} + +/* Run verification tests with known test vectors */ +static void run_verification_tests(void) { + printf("Running verification tests:\n"); + printf("==========================\n"); + + int passed = 0; + for (size_t i = 0; i < sizeof(test_vectors) / sizeof(test_vectors[0]); i++) { + const struct test_vector *tv = &test_vectors[i]; + uint32_t result = adler32((const unsigned char *)tv->data, strlen(tv->data), adler32_init()); + + printf("Test %zu: \"%s\"\n", i + 1, tv->data); + printf(" Expected: 0x%08x\n", tv->expected); + printf(" Result: 0x%08x\n", result); + + if (result == tv->expected) { + printf(" Status: PASS\n"); + passed++; + } else { + printf(" Status: FAIL\n"); + } + printf("\n"); + } + + printf("Verification summary: %d/%zu tests passed\n\n", + passed, sizeof(test_vectors) / sizeof(test_vectors[0])); +} + +/* Run performance tests with different data sizes */ +static void run_performance_tests(void) { + printf("Running performance tests:\n"); + printf("=========================\n"); + + /* Define test data sizes */ + struct test_data tests[] = { + { NULL, 1 * KB, "1 KB" }, + { NULL, 10 * KB, "10 KB" }, + { NULL, 100 * KB, "100 KB" }, + { NULL, 1 * MB, "1 MB" }, + { NULL, 10 * MB, "10 MB" } + }; + + /* Generate random test data for each size */ + for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + tests[i].buffer = generate_test_data(tests[i].size); + test_performance(tests[i].buffer, tests[i].size, tests[i].name); + } + + /* Free allocated memory */ + for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + free(tests[i].buffer); + } +} + +/* Test the incremental API */ +static void test_incremental_api(void) { + printf("Testing incremental API:\n"); + printf("=======================\n"); + + /* Generate test data */ + size_t size = 1 * MB; + unsigned char *data = generate_test_data(size); + + /* Calculate checksum in one go */ + uint32_t direct = adler32(data, size, adler32_init()); + + /* Calculate checksum incrementally */ + uint32_t incremental = adler32_init(); + + /* Process in chunks of 64KB */ + size_t chunk_size = 64 * KB; + for (size_t offset = 0; offset < size; offset += chunk_size) { + size_t remaining = size - offset; + size_t current_chunk = (remaining < chunk_size) ? remaining : chunk_size; + incremental = adler32_update(incremental, data + offset, current_chunk); + } + incremental = adler32_final(incremental); + + /* Compare results */ + printf(" Direct calculation: 0x%08x\n", direct); + printf(" Incremental calculation: 0x%08x\n", incremental); + printf(" Match: %s\n\n", (direct == incremental) ? "YES" : "NO"); + + free(data); +} + +int main(void) { + /* Seed random number generator */ + srand((unsigned int)time(NULL)); + + printf("Adler-32 Test and Benchmark\n"); + printf("===========================\n\n"); + + /* Run verification tests with known test vectors */ + run_verification_tests(); + + /* Test the incremental API */ + test_incremental_api(); + + /* Run performance tests with different data sizes */ + run_performance_tests(); + + return 0; +} + + +Made changes. + +I've created the adler32-test.c file with a comprehensive test suite for the Adler-32 checksum implementation. This test file includes: + +1. **Verification tests** with known test vectors to ensure the implementation produces the expected output for standard inputs. + +2. **Incremental API test** to verify that performing the checksum calculation incrementally (in chunks) produces the same result as calculating it in one pass. + +3. **Performance tests** with randomly generated data of various sizes: + - 1 KB + - 10 KB + - 100 KB + - 1 MB + - 10 MB + +The test program performs the following operations: +- Generates random test data for each size +- Calculates the checksum for each data buffer +- Measures the execution time +- Calculates and displays the throughput in MB/s + +The test output will show: +- Results from verification tests against known checksums +- Comparison between direct and incremental calculation +- Performance metrics for each data size + +To compile and run this test, you'll need to use a command like: + +gcc -Wall -O2 adler32-simple.c adler32-test.c -o adler32-test +./adler32-test + +If you'd like me to create the Makefile for easier compilation, just let me know. + +``` + +You now have a test program which confirms the Adler32 algorithm is working correctly for various input data and can measure the performance of the previously-generated C function. + +Now continue to the next section to run the test program and view the results. + diff --git a/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md b/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md index fdabfb3bd0..4ef03c2c09 100644 --- a/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md +++ b/content/learning-paths/cross-platform/dynamic-memory-allocator/_index.md @@ -1,34 +1,26 @@ --- -armips: null -author: David Spickett +title: Write a Dynamic Memory Allocator + +minutes_to_complete: 120 + +who_is_this_for: This is an introductory topic for software developers learning about dynamic memory allocation for the first time, and who may have used malloc and free in C programming. It also provides a starting point to explore more advanced memory allocation topics. + layout: learningpathall learning_objectives: - Explain how dynamic memory allocation and the C heap works - Write a simple dynamic memory allocator - Explain some of the risks of heap allocation in general -learning_path_main_page: 'yes' -minutes_to_complete: 120 -operatingsystems: -- Linux + prerequisites: - Familiarity with C programming, with a good understanding of pointers. - A Linux machine to run the example code. -skilllevels: Introductory -subjects: Performance and Architecture -armips: - - Cortex-A - - Neoverse + +author: David Spickett + test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed -title: Write a Dynamic Memory Allocator -tools_software_languages: -- C -- Coding -- Runbook further_reading: - resource: @@ -40,12 +32,29 @@ further_reading: link: https://llsoftsec.github.io/llsoftsecbook/#memory-vulnerability-based-attacks type: website +### Tags +skilllevels: Introductory +subjects: Performance and Architecture +armips: + - Cortex-A + - Neoverse +operatingsystems: + - Linux +tools_software_languages: + - C + - Coding + - Runbook -weight: 1 -who_is_this_for: This is an introductory topic for software developers learning about dynamic memory allocation for the first time, - and who may have used malloc and free in C programming. It also provides a starting point to explore more advanced memory allocation topics. +### Cross-platform metadata only shared_path: true shared_between: - laptops-and-desktops - embedded-and-microcontrollers ---- + + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- \ No newline at end of file diff --git a/content/learning-paths/cross-platform/intrinsics/_index.md b/content/learning-paths/cross-platform/intrinsics/_index.md index ecf582fcb3..832cbfbc44 100644 --- a/content/learning-paths/cross-platform/intrinsics/_index.md +++ b/content/learning-paths/cross-platform/intrinsics/_index.md @@ -1,33 +1,39 @@ --- -armips: -- Neoverse -- Cortex-A -author: Jason Andrews +title: Porting architecture specific intrinsics + description: Learn how to port architecture-specific intrinsics to Arm processors. -layout: learningpathall + +minutes_to_complete: 30 + +who_is_this_for: This is an advanced topic for software developers interested in porting + architecture specific intrinsics to Arm processors. + learning_objectives: - Describe what intrinsics are and how to find them in code. - Evaluate options and use header-only libraries to port architecture-specific intrinsics to Arm. -learning_path_main_page: 'yes' -minutes_to_complete: 30 -operatingsystems: -- Linux + prerequisites: - Some understanding of SIMD concepts. - An Arm based machine or [cloud instance](/learning-paths/servers-and-cloud-computing/csp/) running Ubuntu Linux. - Optionally, an `x86_64` machine also running Ubuntu. -skilllevels: Advanced -subjects: Performance and Architecture + +author: Jason Andrews + test_images: - amd64/ubuntu:latest - arm64v8/ubuntu:latest test_link: https://github.com/armflorentlebeau/arm-learning-paths/actions/runs/4312122327 test_maintenance: true -test_status: -- passed -- passed -title: Porting architecture specific intrinsics + +### Tags +skilllevels: Advanced +subjects: Performance and Architecture +armips: +- Neoverse +- Cortex-A +operatingsystems: +- Linux tools_software_languages: - Neon - SVE @@ -53,15 +59,15 @@ further_reading: link: https://youtu.be/Ya9Co04fszI type: video - - -weight: 1 -who_is_this_for: This is an advanced topic for software developers interested in porting - architecture specific intrinsics to Arm processors. - ### Cross-platform metadata only shared_path: true shared_between: - servers-and-cloud-computing - laptops-and-desktops + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. --- diff --git a/content/learning-paths/cross-platform/intrinsics/intro.md b/content/learning-paths/cross-platform/intrinsics/intro.md index b281b64da2..68c715617e 100644 --- a/content/learning-paths/cross-platform/intrinsics/intro.md +++ b/content/learning-paths/cross-platform/intrinsics/intro.md @@ -8,11 +8,11 @@ weight: 2 Migrating C/C++ applications from x64 to Arm requires recompiling the source code for the Arm architecture. A simple recompile works much of the time, but not always. -SIMD extensions are one of the common barriers encountered when porting C/C++ applications from x64 to Arm. This article is a short background on intrinsics and how to identify them in code. This Learning Path presents options for how to get the code compiled and running on an Arm-based platform. +SIMD extensions are one of the common barriers encountered when porting C/C++ applications from x64 to Arm. This article is a short background on intrinsics and how to identify them in code. This Learning Path presents options for how to get the code compiled and running on an Arm-based platform. ## Intrinsics -Intrinsics are functions which are built into the compiler and not part of a library. They look like function calls, but don’t require an actual function call. When the compiler encounters intrinsics it directly substitutes a sequence of instructions. Intrinsics are often used to access special instructions that don’t have a direct mapping from C/C++ or when performance optimization is needed. +Intrinsics are functions which are built into the compiler and not part of a library. They look like function calls, but don’t require an actual function call. When the compiler encounters intrinsics it directly substitutes a sequence of instructions. Intrinsics are often used to access special instructions that don’t have a direct mapping from C/C++ or when performance optimization is needed. One use of intrinsics is to access SIMD (single-instruction, multiple-data) instructions directly from C/C++ for improved application performance. Intrinsics are easier to work with compared to assembly language, but they often pose a challenge when porting source code to a new architecture. @@ -21,7 +21,7 @@ Intel Streaming SIMD Extensions (SSE) and [Arm NEON](https://developer.arm.com/d There are generally 3 ways to program SIMD hardware: - The C/C++ compiler recognizes opportunities to use SIMD instructions and inserts them automatically (with or without some guidance) - Intrinsics to access SIMD instructions directly from C/C++ source code -- Assembly programming +- Assembly programming ## Source code example diff --git a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/5-SME2-matmul-asm.md b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/5-sme2-matmul-asm.md similarity index 100% rename from content/learning-paths/cross-platform/multiplying-matrices-with-sme2/5-SME2-matmul-asm.md rename to content/learning-paths/cross-platform/multiplying-matrices-with-sme2/5-sme2-matmul-asm.md diff --git a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/6-SME2-matmul-intr.md b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/6-sme2-matmul-intr.md similarity index 100% rename from content/learning-paths/cross-platform/multiplying-matrices-with-sme2/6-SME2-matmul-intr.md rename to content/learning-paths/cross-platform/multiplying-matrices-with-sme2/6-sme2-matmul-intr.md diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/app.md b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/app.md index 5848fe0386..275d199bee 100644 --- a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/app.md +++ b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/app.md @@ -30,8 +30,8 @@ To run the app in Android Studio using an emulator, follow these steps: Once the application starts, click the **Load Image** button. It loads a randomly-selected image. Then, click **Run Inference** to recognize the digit. The application displays the predicted label and the inference time as shown below: -![img alt-text#center](Figures/05.png "Figure 7. Digit Recognition 1") +![img alt-text#center](figures/05.png "Figure 7. Digit Recognition 1") -![img alt-text#center](Figures/06.png "Figure 8. Digit Recognition 2") +![img alt-text#center](figures/06.png "Figure 8. Digit Recognition 2") In the next step of this Learning Path, you will learn how to further optimize the model. diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/datasets-and-training.md b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/datasets-and-training.md index 1c66e8fedf..5f052a280a 100644 --- a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/datasets-and-training.md +++ b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/datasets-and-training.md @@ -72,7 +72,7 @@ Make sure to replace 'x' with the version number of Python that you have install After running the code, you will see output similar to Figure 5: -![image alt-text#center](Figures/01.png "Figure 5. Output") +![image alt-text#center](figures/01.png "Figure 5. Output") ## Train the Model @@ -134,7 +134,7 @@ for t in range(epochs): After running the code, you see the following output showing the training progress, as displayed in Figure 2. -![image alt-text#center](Figures/02.png "Figure 2. Output 2") +![image alt-text#center](figures/02.png "Figure 2. Output 2") Once the training is complete, you see output similar to: diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/01.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/01.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/01.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/01.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/02.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/02.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/02.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/02.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/03.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/03.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/03.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/03.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/05.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/05.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/05.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/05.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/06.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/06.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/06.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/06.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/07.jpg b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/07.jpg similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/07.jpg rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/07.jpg diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/08.jpg b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/08.jpg similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/08.jpg rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/08.jpg diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/1.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/1.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/1.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/1.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/2.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/2.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/2.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/2.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/3.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/3.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/3.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/3.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/4.png b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/4.png similarity index 100% rename from content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/Figures/4.png rename to content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/figures/4.png diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/inference.md b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/inference.md index 343f3a822c..e5a0402c0d 100644 --- a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/inference.md +++ b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/inference.md @@ -108,7 +108,7 @@ This code demonstrates how to use a saved PyTorch model for inference and visual After running the code, you should see results similar to the following figure: -![image](Figures/03.png "Figure 6. Results Displayed") +![image](figures/03.png "Figure 6. Results Displayed") ### What have you learned? diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/intro.md b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/intro.md index 683b9eb999..12b2a01994 100644 --- a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/intro.md +++ b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/intro.md @@ -117,7 +117,7 @@ To ensure everything is set up correctly, follow these next steps: 4. Select the Python kernel you created earlier, `pytorch-env`. To do so, click **Kernels** in the top right-hand corner. Then, click **Jupyter Kernel...**, and you will see the Python kernel as shown below: -![img1 alt-text#center](Figures/1.png "Figure 1: Python kernel.") +![img1 alt-text#center](figures/1.png "Figure 1: Python kernel.") 5. In your Jupyter notebook, run the following code to verify PyTorch is working correctly: @@ -127,6 +127,6 @@ print(torch.__version__) ``` It will look as follows: -![img2 alt-text#center](Figures/2.png "Figure 2: Jupyter Notebook.") +![img2 alt-text#center](figures/2.png "Figure 2: Jupyter Notebook.") Now you have set up your development environment, you can move on to creating a PyTorch model. diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/mobile-app.md b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/mobile-app.md index cfbc922d4d..630f04422f 100644 --- a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/mobile-app.md +++ b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/mobile-app.md @@ -228,9 +228,9 @@ This optimization showcases the benefits of quantization and layer fusion for mo This would allow the model to take full advantage of the device's computational capabilities, potentially further reducing the inference time. -![fig alt-text#center](Figures/07.jpg "Figure 9.") +![fig alt-text#center](figures/07.jpg "Figure 9.") -![fig alt-text#center](Figures/08.jpg "Figure 10.") +![fig alt-text#center](figures/08.jpg "Figure 10.") ### What have you learned? diff --git a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/model.md b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/model.md index e189a5c708..2e2e1ae80b 100644 --- a/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/model.md +++ b/content/learning-paths/cross-platform/pytorch-digit-classification-arch-training/model.md @@ -9,7 +9,7 @@ layout: "learningpathall" You can create and train a feedforward neural network to classify handwritten digits from the MNIST dataset. This dataset contains 70,000 images, comprising 60,000 training images and 10,000 testing images of handwritten numerals (0-9), each with dimensions of 28x28 pixels. Some representative MNIST digits with their corresponding labels are shown in Figure 3: -![img3 alt-text#center](Figures/3.png "Figure 3: MNIST Digits and Labels.") +![img3 alt-text#center](figures/3.png "Figure 3: MNIST Digits and Labels.") The neural network begins with an input layer containing 28x28 = 784 input nodes, with each node accepting a single pixel from a MNIST image. @@ -94,7 +94,7 @@ summary(model, (1, 28, 28)) After running the notebook, you will see the output as shown in Figure 4: -![img4 alt-text#center](Figures/4.png "Figure 4: Notebook Output.") +![img4 alt-text#center](figures/4.png "Figure 4: Notebook Output.") You will see a detailed summary of the NeuralNetwork model’s architecture, including the following information: diff --git a/content/learning-paths/cross-platform/woa_azure/azure_windows.md b/content/learning-paths/cross-platform/woa_azure/azure_windows.md index 58769e274d..bd016e37e0 100644 --- a/content/learning-paths/cross-platform/woa_azure/azure_windows.md +++ b/content/learning-paths/cross-platform/woa_azure/azure_windows.md @@ -23,7 +23,7 @@ To begin: 1. Login to your Azure account. 2. From the Azure search bar, begin typing the word "virtual machines" until the [Virtual Machines](https://portal.azure.com/#view/HubsExtension/BrowseResource/resourceType/Microsoft.Compute%2FVirtualMachines) service appears as a selection. -![Search and Select VMs](Images/search_and_click_vm.png) +![Search and Select VMs](images/search_and_click_vm.png) 3. Select `Virtual Machines`. @@ -31,7 +31,7 @@ The Virtual Machines page appears. 4. Click `Create` > `Azure virtual machine`. -![Create an Azure VM](Images/click_create_vm.png) +![Create an Azure VM](images/click_create_vm.png) The `Create a virtual machine` page appears. @@ -42,14 +42,14 @@ The `Create a virtual machine` page appears. 9. Select `Standard` for `Security Type`. 10. Select `See all images` - You can find this under the `Image` dropdown. -![Config the VM Part 1](Images/config-vm-1.png) +![Config the VM Part 1](images/config-vm-1.png) The `Marketplace` page appears. 11. Enter `windows 11` into the search bar and press enter. 12. Click the `Image Type` filter and select `Arm64`. -![Select Arm images](Images/image_type.png) +![Select Arm images](images/image_type.png) You are presented with all available Arm VM images. @@ -59,7 +59,7 @@ Note all the different Arm images (Windows and others) Azure provides; feel free 13. From the drop-down at the bottom of the `Microsoft Windows 11 Preview arm64` tile, select `Windows 11 Professional 24H2 - Arm64`. -![img2](Images/select_24h2.png) +![img2](images/select_24h2.png) Upon selection, you are returned to the `Create a virtual machine` screen with your architecture set to `Arm64` and your image set to `Windows 11 Professional, version 24H2 - ARM64 Gen2`. @@ -79,21 +79,21 @@ The VM size suggested for this learning path is enough to demonstrate Windows on 19. Click `Review + create`. -![Review and Create](Images/review_create.png) +![Review and Create](images/review_create.png) The validation and confirmation page appears. 20. Upon review and validation of your settings, select `Create` to launch your Windows on Arm virtual machine. -![Create](Images/create.png) +![Create](images/create.png) At this point, Azure will deploy your new Windows on Arm instance to the Azure cloud. This may take a few moments to complete. -![Deploy](Images/deploying.png) +![Deploy](images/deploying.png) Upon completion, your screen will look similar to: -![Deploy Complete](Images/deploy_complete.png) +![Deploy Complete](images/deploy_complete.png) 21. Click `Go to Resource`. @@ -101,7 +101,7 @@ Upon completion, your screen will look similar to: From this resource page, note the `Public IP address` of your virtual machine, as you will use in the next step to connect to your VM instance. -![Public IP](Images/public_ip.png) +![Public IP](images/public_ip.png) ## Connect to your Windows on Arm VM @@ -117,7 +117,7 @@ In our example, we use the `Remote Desktop Connection` app as our RDP client, bu 4. Click connect. -![RDP #center](Images/rdp.png) +![RDP #center](images/rdp.png) You can now interact with the VM in the same way as you would a local desktop. @@ -128,4 +128,4 @@ If you have issues connecting to your instance, see this Microsoft article: Open `Control Panel` > `System`, and verify that `Device` > `System Type` identifies as an Arm-based processor. -![System #center](Images/system.png) +![System #center](images/system.png) diff --git a/content/learning-paths/cross-platform/woa_azure/Images/azure2.png b/content/learning-paths/cross-platform/woa_azure/images/azure2.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/azure2.png rename to content/learning-paths/cross-platform/woa_azure/images/azure2.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/click_create.png b/content/learning-paths/cross-platform/woa_azure/images/click_create.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/click_create.png rename to content/learning-paths/cross-platform/woa_azure/images/click_create.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/click_create_vm.png b/content/learning-paths/cross-platform/woa_azure/images/click_create_vm.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/click_create_vm.png rename to content/learning-paths/cross-platform/woa_azure/images/click_create_vm.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/config-vm-1.png b/content/learning-paths/cross-platform/woa_azure/images/config-vm-1.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/config-vm-1.png rename to content/learning-paths/cross-platform/woa_azure/images/config-vm-1.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/create.png b/content/learning-paths/cross-platform/woa_azure/images/create.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/create.png rename to content/learning-paths/cross-platform/woa_azure/images/create.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/deploy_complete.png b/content/learning-paths/cross-platform/woa_azure/images/deploy_complete.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/deploy_complete.png rename to content/learning-paths/cross-platform/woa_azure/images/deploy_complete.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/deploying.png b/content/learning-paths/cross-platform/woa_azure/images/deploying.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/deploying.png rename to content/learning-paths/cross-platform/woa_azure/images/deploying.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/image_type.png b/content/learning-paths/cross-platform/woa_azure/images/image_type.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/image_type.png rename to content/learning-paths/cross-platform/woa_azure/images/image_type.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/public_ip.png b/content/learning-paths/cross-platform/woa_azure/images/public_ip.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/public_ip.png rename to content/learning-paths/cross-platform/woa_azure/images/public_ip.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/rdp.png b/content/learning-paths/cross-platform/woa_azure/images/rdp.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/rdp.png rename to content/learning-paths/cross-platform/woa_azure/images/rdp.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/review_create.png b/content/learning-paths/cross-platform/woa_azure/images/review_create.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/review_create.png rename to content/learning-paths/cross-platform/woa_azure/images/review_create.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/search_and_click_vm.png b/content/learning-paths/cross-platform/woa_azure/images/search_and_click_vm.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/search_and_click_vm.png rename to content/learning-paths/cross-platform/woa_azure/images/search_and_click_vm.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/select_24h2.png b/content/learning-paths/cross-platform/woa_azure/images/select_24h2.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/select_24h2.png rename to content/learning-paths/cross-platform/woa_azure/images/select_24h2.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/select_win_arm_64.png b/content/learning-paths/cross-platform/woa_azure/images/select_win_arm_64.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/select_win_arm_64.png rename to content/learning-paths/cross-platform/woa_azure/images/select_win_arm_64.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/system.png b/content/learning-paths/cross-platform/woa_azure/images/system.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/system.png rename to content/learning-paths/cross-platform/woa_azure/images/system.png diff --git a/content/learning-paths/cross-platform/woa_azure/Images/vm_search.png b/content/learning-paths/cross-platform/woa_azure/images/vm_search.png similarity index 100% rename from content/learning-paths/cross-platform/woa_azure/Images/vm_search.png rename to content/learning-paths/cross-platform/woa_azure/images/vm_search.png diff --git a/content/learning-paths/embedded-and-microcontrollers/_index.md b/content/learning-paths/embedded-and-microcontrollers/_index.md index 3e971a13be..1008783e14 100644 --- a/content/learning-paths/embedded-and-microcontrollers/_index.md +++ b/content/learning-paths/embedded-and-microcontrollers/_index.md @@ -11,15 +11,15 @@ maintopic: true operatingsystems_filter: - Android: 1 - Baremetal: 29 -- Linux: 25 -- macOS: 5 +- Linux: 26 +- macOS: 6 - RTOS: 9 -- Windows: 3 +- Windows: 4 subjects_filter: - CI-CD: 5 - Containers and Virtualization: 6 - Embedded Linux: 3 -- Libraries: 2 +- Libraries: 3 - ML: 12 - Performance and Architecture: 21 - RTOS Fundamentals: 4 @@ -36,11 +36,12 @@ tools_software_languages_filter: - Arm Fast Models: 4 - Arm Virtual Hardware: 10 - AVH: 1 -- C: 1 +- C: 2 - C/C++: 1 - ChatGPT: 1 - Clang: 1 - CMSIS: 4 +- CMSIS-DSP: 1 - CMSIS-Toolbox: 3 - CNN: 1 - Coding: 26 @@ -59,6 +60,7 @@ tools_software_languages_filter: - GitLab: 1 - Himax SDK: 1 - IP Explorer: 4 +- Jupyter Notebook: 1 - K3s: 1 - Keil: 5 - Keil MDK: 3 @@ -70,7 +72,7 @@ tools_software_languages_filter: - Neon: 1 - Paddle: 1 - Porcupine: 1 -- Python: 4 +- Python: 5 - PyTorch: 2 - QEMU: 1 - Raspberry Pi: 5 @@ -86,6 +88,7 @@ tools_software_languages_filter: - TrustZone: 2 - TVMC: 1 - vcpkg: 1 +- VS Code: 1 - Yocto Linux: 1 - Zephyr: 1 weight: 5 diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/connecting_peripheral.md b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/connecting_peripheral.md index 76bfd0c9a6..86fdf0c0a1 100644 --- a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/connecting_peripheral.md +++ b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/connecting_peripheral.md @@ -12,23 +12,23 @@ Follow the steps outline below to connect the AXI4 peripheral you created in the 1. Right-click on the empty space again in the diagram and choose “Create Port...” (Ctrl+K). Create 2 ports with the following settings and names: - ![Creating output port](Images/Picture15.png) + ![Creating output port](images/Picture15.png) *Figure 3.1. Creating output port* - ![Creating input port](Images/Picture16.png) + ![Creating input port](images/Picture16.png) *Figure 3.2. Creating input port* 2. Wire the “sw[3:0]” input to the “sw[3:0]” port of the “axi_gpio_asoc_0” block and the same for the “led[3:0]” output to the equivalent port of the block as shown in the diagram below. This connects them to external ports of the ZYNQ chip: - ![Vivado block diagram](Images/Picture17.png) + ![Vivado block diagram](images/Picture17.png) *Figure 3.3. Vivado block diagram* 3. Two IP blocks will be generated automatically. The “Processor System Reset” IP is used to generate reset signals for different peripherals. The “AXI Interconnect” IP here is used to interconnect AXI4-Lite Subordinate and AXI Manager. Select the “Address Editor” tab next to “Diagram” and change the “64K” to “4K”. Save all your progress. - ![Changing peripheral address settings](Images/Picture18.png) + ![Changing peripheral address settings](images/Picture18.png) *Figure 3.4. Changing peripheral address settings* @@ -40,26 +40,26 @@ Follow the steps outline below to connect the AXI4 peripheral you created in the 5. Right-click the “Generate Bitstream” option in the “Flow Navigator” on the left and select “Bitstream settings”. Click the three dots next to “tcl.pre”: - ![Changing bitstream settings](Images/Picture19.png) + ![Changing bitstream settings](images/Picture19.png) *Figure 3.5. Changing bitstream settings* 6. Select the “New Script” option, click the three dots next to the empty box, choose the “pins.tcl” file you created earlier and click “Ok” on all windows. Right-click the “Constraints” under the sources tab and select “Add sources”: - ![Adding sources](Images/Picture20.png) + ![Adding sources](images/Picture20.png) *Figure 3.6. Adding sources* 7. Select “Add or create constraints” and click “Next”. Select “Create File”, give any name to the file for example pin_constraints, and click “Finish”: - ![Creating a constraints file](Images/Picture21.png) + ![Creating a constraints file](images/Picture21.png) *Figure 3.7. Creating a constraints file* 8. Expand the “Constraints” folder within the “Sources” tab and double-click the file you just created to open it. Add the following constraints from [Digilent/Zybo-Z7-10-Pmod-VGA/blob/master/src/constraints/Zybo-Z7-Master.xdc](https://github.com/Digilent/Zybo-Z7-10-Pmod-VGA/blob/master/src/constraints/Zybo-Z7-Master.xdc), and save the file: - ![Editing constraints file](Images/Picture22.png) + ![Editing constraints file](images/Picture22.png) *Figure 3.8. Editing constraints file* diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/creating_peripheral.md b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/creating_peripheral.md index e15534e45d..1498a78246 100644 --- a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/creating_peripheral.md +++ b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/creating_peripheral.md @@ -12,19 +12,19 @@ The Xilinx Vivado tools provide a simplified way to create an AXI4 peripheral. Y 1. Start by clicking “Tools” -> “Create and Package New IP”. Click “Next” and choose the following option: - ![Creating AXI4 peripheral](Images/Picture7.png) + ![Creating AXI4 peripheral](images/Picture7.png) *Figure 2.1. Creating AXI4 peripheral* 2. Give the following name to the IP (you may keep the IP location path as provided by default): - ![Adding peripheral details](Images/Picture8.jpg) + ![Adding peripheral details](images/Picture8.jpg) *Figure 2.2. Adding peripheral details* 3. You need a Subordinate interface and four 32-bit registers for the switches and LEDs. - ![Peripheral settings for Subordinate interface](Images/Picture9.png) + ![Peripheral settings for Subordinate interface](images/Picture9.png) *Figure 2.3. Peripheral settings for Subordinate interface* @@ -36,7 +36,7 @@ These are the basic template files generated for an AXI-Lite peripheral. You can 5. Double-click to open the top-level Verilog file called “AUP_advanced_SoC_v1_0.v”: - ![Editing top-level Verilog file](Images/Picture10.png) + ![Editing top-level Verilog file](images/Picture10.png) *Figure 2.4. Editing top-level Verilog file* @@ -64,7 +64,7 @@ These are the basic template files generated for an AXI-Lite peripheral. You can ``` 8. Save the changes in the file (Ctrl+S). Next, expand and open the other Verilog file (AUP_advanced_SoC_v1_0_S00_AXI.v) shown below: - ![Edit the second Verilog file](Images/Picture11.png) + ![Edit the second Verilog file](images/Picture11.png) *Figure 2.5. Edit the second Verilog file* @@ -93,7 +93,7 @@ These are the basic template files generated for an AXI-Lite peripheral. You can 12. Save the changes in the file (Ctrl+S). Next, go to the “Package IP – AUP_advanced_SoC” tab, choose the “Customization Parameters” option on the left and click “Merge Changes from Customization Parameters Wizard” to update the IP package with the changes made in HDL files: - ![. Saving all the changes](Images/Picture12.png) + ![. Saving all the changes](images/Picture12.png) *Figure 2.6. Saving all the changes* @@ -127,7 +127,7 @@ These are the basic template files generated for an AXI-Lite peripheral. You can ``` 15. Then, click on “Settings” (under “Project Manager”) in the “Flow Navigator” menu on the left. Expand the “IP” section in the new window that appears and choose the “Repository” option. - ![Adding IP Repository](Images/Picture13.png) + ![Adding IP Repository](images/Picture13.png) *Figure 2.7. Adding IP Repository* @@ -135,7 +135,7 @@ These are the basic template files generated for an AXI-Lite peripheral. You can 17. Click “Run Connection Automation” and then click “OK” to connect the AXI-Lite Subordinate interface on GPIO peripheral to the AXI Manager interface on Arm processor. - ![Connect AXI-Lite Subordinate interface (Custom IP) to AXI Manager interface](Images/Picture14.png) + ![Connect AXI-Lite Subordinate interface (Custom IP) to AXI Manager interface](images/Picture14.png) *Figure 2.8. Connect AXI-Lite Subordinate interface (Custom IP) to AXI Manager interface* diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/generating_bitstream.md b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/generating_bitstream.md index 8aa3bd0f7c..cef827302b 100644 --- a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/generating_bitstream.md +++ b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/generating_bitstream.md @@ -12,56 +12,56 @@ In this section you will generate the bitstream for this project and write a C p 1. Under the “Sources” tab on the left, expand the “Design Sources” folder, right-click the design1.bd file, choose the “Create HDL Wrapper” and select all default options. - ![Creating HDL Wrapper](Images/Picture23.png) + ![Creating HDL Wrapper](images/Picture23.png) *Figure 3.1. Creating HDL Wrapper* 2. Save any other changes and click “Generate Bitstream” on the left and click “Ok” for all pop-ups. This process takes some time. Once the process is done, select “Open Hardware Manager”: - ![Generating bitstream and uploading to board](Images/Picture24.png) + ![Generating bitstream and uploading to board](images/Picture24.png) *Figure 3.2. Generating bitstream and uploading to board* 3. Connect the board and make sure the jumper (JP3) above the red LED on the Zybo board is in JTAG mode. Then, in Vivado, click “Auto Connect” in the Hardware Manager as shown below: - ![Connecting the board](Images/Picture25.png) + ![Connecting the board](images/Picture25.png) *Figure 3.3. Connecting the board* 4. Right-click on the board, select “Program Device” as shown below and click “Program” on the pop-up window. - ![Programming the board](Images/Picture26.png) + ![Programming the board](images/Picture26.png) *Figure 3.4. Programming the board* 5. Once the board is programmed, the green LED labeled “LD12” should light up on the board. Click “File” on the main menu bar and select “Export” -> “Export Hardware” and click “Next” on the pop-up window. Choose the following option on the next page: - ![Exporting hardware and bitstream file](Images/Picture27.png) + ![Exporting hardware and bitstream file](images/Picture27.png) *Figure 3.5. Exporting hardware and bitstream file* 6. Choose the “export to” location as the project folder and save the file. Then click “Finish”. Next, click “Tools” on the main menu bar and select “Launch Vitis IDE”. Choose the same project folder as your workspace. Click “File” -> “New” -> “Application Project”. - ![Creating a new application project](Images/Picture28.png) + ![Creating a new application project](images/Picture28.png) *Figure 3.6. Creating a new application project* 7. Select the “Create a new platform from hardware (XSA)” tab and click browse to select the XSA file you saved earlier: - ![Adding the XSA file](Images/Picture29.png) + ![Adding the XSA file](images/Picture29.png) *Figure 3.7. Adding the XSA file* 8. Click next and give a name (e.g. led_system) to the application project. Click “Next” until you reach the following page and choose “Empty Application(C)” and click “Finish”: - ![Creating an empty C Application](Images/Picture30.png) + ![Creating an empty C Application](images/Picture30.png) *Figure 3.8. Creating an empty C Application* 9. Then right-click the “src” folder within the application project you created and add a new file called “main.c”. - ![Adding a main.c file](Images/Picture31.png) + ![Adding a main.c file](images/Picture31.png) *Figure 3.9. Adding a main.c file* @@ -81,7 +81,7 @@ In this section you will generate the bitstream for this project and write a C p 12. Right-click the application project in the explorer tab, select “Build Project” and ensure that the build is successful. Then right click again and select “Run As” and then “1 Launch Hardware” to upload everything to the board. - ![Running the program on the board.](Images/Picture32.png) + ![Running the program on the board.](images/Picture32.png) *Figure 3.10. Running the program on the board.* diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture10.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture10.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture10.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture10.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture11.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture11.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture11.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture11.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture12.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture12.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture12.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture12.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture13.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture13.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture13.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture13.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture14.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture14.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture14.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture14.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture15.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture15.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture15.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture15.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture16.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture16.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture16.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture16.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture17.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture17.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture17.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture17.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture18.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture18.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture18.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture18.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture19.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture19.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture19.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture19.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture2.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture2.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture2.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture2.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture20.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture20.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture20.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture20.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture21.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture21.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture21.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture21.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture22.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture22.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture22.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture22.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture23.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture23.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture23.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture23.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture24.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture24.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture24.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture24.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture25.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture25.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture25.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture25.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture26.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture26.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture26.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture26.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture27.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture27.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture27.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture27.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture28.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture28.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture28.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture28.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture29.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture29.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture29.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture29.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture3.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture3.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture3.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture3.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture30.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture30.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture30.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture30.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture31.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture31.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture31.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture31.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture32.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture32.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture32.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture32.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture4.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture4.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture4.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture4.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture5.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture5.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture5.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture5.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture6.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture6.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture6.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture6.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture7.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture7.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture7.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture7.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture8.jpg b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture8.jpg similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture8.jpg rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture8.jpg diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture9.png b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture9.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/advanced_soc/Images/Picture9.png rename to content/learning-paths/embedded-and-microcontrollers/advanced_soc/images/Picture9.png diff --git a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/setup.md b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/setup.md index 84513f6fdb..8ba3dd68ef 100644 --- a/content/learning-paths/embedded-and-microcontrollers/advanced_soc/setup.md +++ b/content/learning-paths/embedded-and-microcontrollers/advanced_soc/setup.md @@ -16,31 +16,31 @@ After installation is complete, follow the steps outlined below to setup a works 2. Open Xilinx Vivado and create a new project in your workspace as shown below: - ![Creating a project](Images/Picture2.png) + ![Creating a project](images/Picture2.png) *Figure 1.1. Creating a project* 3. Give it a meaningful name and click "Next". Select RTL Project and click “Next”. You will not be adding any sources or constraints so click “Next” for the next two pages. In the next page, click the “Boards” tab and search for “Zybo” in the search bar and click the download button next to “Zybo Z7-10” if the option is available. Then select it and click “Next”. Do not click on the hyperlink but click on the empty area next to “Zybo Z7-10”. - ![Board Selection](Images/Picture3.png) + ![Board Selection](images/Picture3.png) *Figure 1.2. Board Selection* 4. Click “Finish”. In the “Flow Navigator” menu on the left, click “Create Block Design” under “IP Integrator”: - ![Creating a new block design](Images/Picture4.png) + ![Creating a new block design](images/Picture4.png) *Figure 1.3. Creating a new block design* 5. Choose a design name or use the default one and click “Ok”. Within the empty “Diagram” box on the right-hand side, right-click and select “Add IP”. Enter “Zynq” in the search box and choose “ZYNQ7 Processing System”. Click the “Run Block Automation” option that has now appeared. - ![Running Block Automation](Images/Picture5.png) + ![Running Block Automation](images/Picture5.png) *Figure 1.4. Running Block Automation* 6. Click “Ok” on the pop-up window. - ![Run Block Automation default settings](Images/Picture6.png) + ![Run Block Automation default settings](images/Picture6.png) Figure 1.5. Run Block Automation default settings* diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/AddSource.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/AddSource.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/AddSource.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/AddSource.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/AddSource2.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/AddSource2.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/AddSource2.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/AddSource2.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/Build.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/Build.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/Build.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/Build.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/CallStack.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/CallStack.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/CallStack.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/CallStack.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/CallStack2.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/CallStack2.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/CallStack2.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/CallStack2.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/CallStack3.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/CallStack3.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/CallStack3.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/CallStack3.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/Debug.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/Debug.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/Debug.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/Debug.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/Flash.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/Flash.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/Flash.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/Flash.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/NewKeilProject.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/NewKeilProject.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/NewKeilProject.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/NewKeilProject.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/ProjectExplorer.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/ProjectExplorer.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/ProjectExplorer.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/ProjectExplorer.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/SelectDevice1.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/SelectDevice1.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/SelectDevice1.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/SelectDevice1.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/SelectDevice2.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/SelectDevice2.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/SelectDevice2.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/SelectDevice2.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/SoftwareComponents.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/SoftwareComponents.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/SoftwareComponents.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/SoftwareComponents.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/TargetOptions.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/TargetOptions.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/TargetOptions.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/TargetOptions.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/TargetOptions2.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/TargetOptions2.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/TargetOptions2.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/TargetOptions2.png diff --git a/content/learning-paths/embedded-and-microcontrollers/asm/Images/TargetOptions3.png b/content/learning-paths/embedded-and-microcontrollers/asm/images/TargetOptions3.png similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/asm/Images/TargetOptions3.png rename to content/learning-paths/embedded-and-microcontrollers/asm/images/TargetOptions3.png diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_index.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_index.md new file mode 100644 index 0000000000..b149fb3eb9 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_index.md @@ -0,0 +1,71 @@ +--- +title: Getting Started with CMSIS-DSP Using Python + +minutes_to_complete: 30 + +who_is_this_for: Developers writing DSP/AI software + +learning_objectives: + - Understand how to use the CMSIS-DSP Python package + - Understand how the Python implementation maps to the C implementation + - Develop a complex application using CMSIS-DSP + +prerequisites: + - Some familiarity with DSP programming + - Some familiarity with Python programming + - Knowledge of C + - Some familiarity with CMSIS-DSP + - Python installed on your system + +author: Christophe Favergeon + +### Tags +skilllevels: Advanced +subjects: Libraries +armips: + - Cortex-M + - Cortex-A +tools_software_languages: + - VS Code + - CMSIS-DSP + - Python + - C + - Jupyter Notebook +operatingsystems: + - Linux + - Windows + - macOS + + + + + +further_reading: + - resource: + title: Biquad filters with CMSIS-DSP Python package + link: https://developer.arm.com/documentation/102463/latest/ + type: documentation + - resource: + title: CMSIS-DSP library + link: https://github.com/ARM-software/CMSIS-DSP + type: Open-source project + - resource: + title: CMSIS-DSP python package + link: https://pypi.org/project/cmsisdsp/ + type: Open-source project + - resource: + title: CMSIS-DSP Python package examples and tests + link: https://github.com/ARM-software/CMSIS-DSP/tree/main/PythonWrapper/examples + type: Open-source project + - resource: + title: CMSIS-Stream + link: https://github.com/ARM-software/CMSIS-Stream + type: Open-source project + + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/_next-steps.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_next-steps.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/_next-steps.md rename to content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_next-steps.md diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/audiowidget.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/audiowidget.png new file mode 100644 index 0000000000..7d6630778b Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/audiowidget.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/cleaned.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/cleaned.png new file mode 100644 index 0000000000..4bb3e0fd70 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/cleaned.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/hanning.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/hanning.png new file mode 100644 index 0000000000..e36e52d1a1 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/hanning.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-1.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-1.md new file mode 100644 index 0000000000..7efc9b88da --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-1.md @@ -0,0 +1,24 @@ +--- +title: What is the CMSIS-DSP Python package ? +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## What is CMSIS-DSP ? + +CMSIS-DSP is a general-purpose compute library with a focus on DSP. It was initially developed for Cortex-M processors and has recently been upgraded to also support Cortex-A. + +On each processor, CMSIS-DSP is optimized for the architecture: DSP extensions on M4 and M7; Helium on M55 and M85; Neon on A55, etc. + +## What is the CMSIS-DSP Python package ? + +The CMSIS-DSP Python package is a Python API for CMSIS-DSP. Its goal is to make it easier to develop a C solution using CMSIS-DSP by decreasing the gap between a design environment like Python and the final C implementation. + +For this reason, the Python API is as close as possible to the C one. + +Fixed-point arithmetic is rarely provided by Python packages, which generally focus on floating-point operations. The CMSIS-DSP Python package provides the same fixed-point arithmetic functions as the C version: Q31, Q15 and Q7. The package also provides floating-point functions and will also support half-precision floats in the future, like the C API. + +Finally, the CMSIS-DSP Python package is compatible with NumPy and can be used with all other scientific and AI Python packages such as SciPy and PyTorch. + diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-2.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-2.md new file mode 100644 index 0000000000..a06c50a31a --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-2.md @@ -0,0 +1,76 @@ +--- +title: Install the Python packages +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Installing the Python packages +The application you will develop with CMSIS-DSP requires a few additional Python packages besides CMSIS-DSP. These need to be installed before you start writing code. + +Activate the Python environment you have chosen. + +The first package to install is CMSIS-DSP: + +```bash +pip install cmsisdsp +``` +It will also install `NumPy`, which is a dependency of the CMSIS-DSP Python package. + +You'll be working with a Jupyter notebook, so the jupyter package must also be installed: + +```bash +pip install jupyter +``` + +In the Jupyter notebook, you'll be using widgets to play sound, so you'll need to install some additional Jupyter widgets. + +```bash +pip install ipywidgets +``` + +Finally, you'll need packages to read sound files and display plots: + + +```bash +pip install soundfile +pip install matplotlib +``` + +you can now launch the Jupyter notebook: + +```bash +jupyter notebook +``` +Create a new Jupyter notebook by clicking `new` and selecting `Python 3 (ipykernel)`. + +The new notebook will be named `Untitled`. Rename it to something more descriptive. + +You can now import all the required packages. + +Type the following Python code into your notebook and run the cell (shift-enter). +All the Python code in this learning path is intended to be executed in the same Jupyter notebook. + +```python +import cmsisdsp as dsp +import cmsisdsp.fixedpoint as fix +import numpy as np +from numpy.lib.stride_tricks import sliding_window_view + +# Package for plotting +import matplotlib.pyplot as plt + +# Package to display audio widgets in the notebook and upload sound files +import ipywidgets +from IPython.display import display,Audio + +# To convert a sound file to a NumPy array +import io +import soundfile as sf + +# To load test patterns from the Arm Virtual Hardware Echo Canceller dem +from urllib.request import urlopen +``` + +You're now ready to move on to the next steps. \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-3.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-3.md new file mode 100644 index 0000000000..1f5abe6aa3 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-3.md @@ -0,0 +1,87 @@ +--- +title: Load an audio file +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Load an audio file + +Load an audio file from one of the Arm demo repositories on GitHub. + + +```python +test_pattern_url="https://github.com/ARM-software/VHT-SystemModeling/blob/main/EchoCanceller/sounds/yesno.wav?raw=true" +f = urlopen(test_pattern_url) +filedata = f.read() +``` + +You can now play and listen to the audio: +```python +audio=Audio(data=filedata,autoplay=False) +audio +``` + +An audio widget will appear in your Jupyter notebook. It will look like this: + +![audio widget alt-text#center](audiowidget.png "Figure 1. Audio widget") + +You can use it to listen to the audio. + +You'll hear a sequence of the words "yes" and "no", with some noise between them. +The goal of this learning path is to design an algorithm to remove the noise. + + +Next, convert the audio into a NumPy array so that it can be processed using CMSIS-DSP: + +```python +data, samplerate = sf.read(io.BytesIO(filedata)) +if len(data.shape)>1: + data=data[:,0] +data = data.astype(np.float32) +data=data/np.max(np.abs(data)) +dataQ15 = fix.toQ15(data) +``` + +The code above does the following: +- Converts the audio into a NumPy array +- If the audio is stereo, only one channel is kept +- Normalizes the audio to ensure no value exceeds 1 +- Converts the audio to Q15 fixed-point representation to enable the use of CMSIS-DSP fixed-point functions + +Now, plot the audio waveform: + +```python +plt.plot(data) +plt.show() +``` + +You'll get the following output: + +![audio signal alt-text#center](signal.png "Figure 2. Audio signal") + +In the picture, you can see a sequence of words. Between the words, the signal is not zero: there is some noise. + +In a real application, you don't wait for the entire signal to be received. The signal is continuous. The samples are processed as they are received. Processing can either be sample-based or block-based. For this learning path, the processing will be block-based. + +Before you can move to the next step, this signal must be split into blocks. The processing will occur on small blocks of samples of a given duration. + + + +```python +winDuration=30e-3/6 +winOverlap=15e-3/6 + +winLength=int(np.floor(samplerate*winDuration)) +winOverlap=int(np.floor(samplerate*winOverlap)) +slices=sliding_window_view(data,winLength)[::winOverlap,:] +slices_q15=sliding_window_view(dataQ15,winLength)[::winOverlap,:] +``` + +Refer to the [NumPy documentation](https://numpy.org/doc/stable/reference/generated/numpy.lib.stride_tricks.sliding_window_view.html) for details about `sliding_window_view`. It's not the most efficient function, but it is sufficient for this tutorial. + +The signal is split into overlapping blocks: each block reuses half of the samples from the previous block as defined by the `winOverlap` variable. + +You are now ready to move on to the next step: you have an audio signal that has been split into overlapping blocks, and processing will occur on those blocks. + diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md new file mode 100644 index 0000000000..d85a8dc10a --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md @@ -0,0 +1,125 @@ +--- +title: Write a simple VAD +weight: 5 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Write a simple voice activity detection + +To remove the noise between speech segments, you need to detect when voice is present. + +Voice activity detection can be complex, but for this learning path, you'll implement a very simple and naive approach based on energy. The idea is that if the environment isn't too noisy, speech should have more energy than the noise. + +The detection will rely on a comparison with a threshold that must be manually tuned. + +You'll first implement a version of the voice activity detection (VAD) with NumPy, which will serve as a reference. + +Then you'll implement the same version using CMSIS-DSP with the Q15 fixed-point format. + +### NumPy VAD + +First, you need to compute the energy of the signal within a block of samples. You'll ignore any constant component and focus only on the varying part of the signal: + +```python +# Energy of the window +def signal_energy(window): + w = window - np.mean(window) + return(10*np.log10(np.sum(window * window))) +``` +Then, compare the energy to a threshold to determine whether the block of audio is speech or noise: + +```python +def signal_vad(window): + if signal_energy(window)>-11: + return(1) + else: + return(0) +``` + +The threshold is hard-coded. It's not a very clean solution, but it's sufficient for a tutorial. + +When using such a detector, you'll quickly find that it is not sufficient. You'll need another pass to clean up the detection signal. + +```python +def clean_vad(v): + v = np.hstack([[0],v,[0]]) + # Remove isolated peak + vmin=[np.min(l) for l in sliding_window_view(v,3)] + vmin = np.hstack([[0,0],vmin,[0]]) + # Remove isolated hole + vmax=[np.max(l) for l in sliding_window_view(vmin,4)] + return(vmax) +``` + +Now you can apply this algorithm to the audio signal and plot the VAD detection over it to see if it's working: + +```python +_,ax=plt.subplots(1,1) +cleaned=clean_vad([signal_vad(w) for w in slices]) +vad = np.array([[w]*(winLength-winOverlap) for w in cleaned]).flatten() +ax.plot(data) +ax.plot(vad) +``` +![vad alt-text#center](vad.png "Figure 3. VAD") + +The reference implementation works. You can now implement the same version using CMSIS-DSP. + +### CMSIS-DSP Q15 VAD + +First, you need to compute the signal energy from audio in Q15 format using CMSIS-DSP. + +If you look at the CMSIS-DSP documentation, you'll see that the power and log functions don't produce results in Q15 format. Tracking the fixed-point format throughout all lines of an algorithm can be challenging. + +For this tutorial, instead of trying to determine the exact fixed-point format of the output and applying the necessary shift to adjust the output's fixed-point format, we'll simply tune the threshold of the detection function. + +```python +def signal_energy_q15(window): + mean=dsp.arm_mean_q15(window) + # Subtracting the mean won't cause saturation + # So we use the CMSIS-DSP negate function on an array containing a single sample. + neg_mean=dsp.arm_negate_q15([mean])[0] + window=dsp.arm_offset_q15(window,neg_mean) + energy=dsp.arm_power_q15(window) + # Energy is not in Q15 format (refer to the CMSIS-DSP documentation). + energy=dsp.ssat(energy>>20,16) + dB=dsp.arm_vlog_q15([energy]) + # The output of the `vlog` is not in q15 + # The multiplication by 10 is missing compared to the NumPy + # reference implementation. + # The result of this function is not equivalent to the float implementation due to different + # formats used in intermediate computations. + # As a consequence, a different threshold must be used to compensate for these differences. + return(dB[0]) +``` + +The comparison function is very similar to the NumPy reference, but the threshold is different: + +```python +def signal_vad_q15(window): + # The threshold is not directly comparable to the float implementation + # due to the different intermediate formats used in the fixed-point implementation. + if signal_energy_q15(window)>fix.toQ15(-0.38): + return(1) + else: + return(0) +``` + +Note that in a C code, you would use the output of `fix.toQ15(-0.38)`. + +`fix.toQ15` is a utility of the Python package to convert float to fixed-point. It is not available in the CMSIS-DSP C implementation. +CMSIS-DSP C has functions like `arm_float_to_q15` which work on arrays and are meant to be used at runtime. If you need a precomputed constant, you can use a utility function like `fix.toQ15` and use the resulting value in the code. + +The clean VAD function is the same for both the NumPy and Q15 versions. + +Now you can check whether the Q15 version is working by plotting the signal and the output of the Q15 VAD algorithm. + +```python +_,ax=plt.subplots(1,1) +cleaned=clean_vad([signal_vad_q15(w) for w in slices_q15]) +vad_q15 = np.array([[w]*winOverlap for w in cleaned]).flatten() +ax.plot(data) +ax.plot(vad_q15) + +``` \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-5.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-5.md new file mode 100644 index 0000000000..e36593d3e0 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-5.md @@ -0,0 +1,390 @@ +--- +title: Write a noise suppression algorithm +weight: 6 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Write a noise suppression algorithm + +### Overlapping windows + +The blocks of audio samples you created in the previous steps will be multiplied by a Hanning window function, which looks like this: + +```python +window=dsp.arm_hanning_f32(winLength) +plt.plot(window) +plt.show() +``` + +![hanning alt-text#center](hanning.png "Figure 4. Hanning Window") + + +The slices we created are overlapping. By applying a Hanning window function and summing the slices, you can reconstruct the original signal. + +Indeed, summing two Hanning windows shifted by half the width of the sample block gives: +![summed hanning alt-text#center](sumhanning.png "Figure 5. Summed Hanning Window") + +As result, if you multiply the overlapping blocks of samples by Hanning windows and sum the result, you can reconstruct the original signal: + + +```python +offsets = range(0, len(data),winOverlap) +offsets=offsets[0:len(slices)] +res=np.zeros(len(data)) +i=0 +for n in offsets: + res[n:n+winLength] += slices[i]*window + i=i+1 +plt.plot(res) +plt.show() +``` + +You can now listen to the recombined signal: +```python +audio2=Audio(data=res,rate=samplerate,autoplay=False) +audio2 +``` + + +This means you can process each slice independently and then recombine them at the end to produce the output signal. + +### Principle of the noise reduction + +The algorithm works in the spectral domain, so a FFT will be used. +When there is no speech (as detected with the VAD), the noise level in each frequency band is estimated. + +When speech is detected, the noise estimate is used. + +Noise filtering in each band uses a simplified Wiener filter. + +A gain is applied to the signal, defined as follow: + +$$H(f) = \frac{S(f)}{S(f) + N(f)}$$ + +- \(S(f)\) is the speech spectrum. +- \(N(f)\) is the noise spectrum. + +$$H(f) = \frac{1}{1 + \frac{N(f)}{S(f)}}$$ + +For this tutorial, we assume a high SNR. The VAD relies on this assumption: the signal energy is sufficient to detect speech. +With a high signal-to-noise ratio, the transfer function can be approximated as: + +$$H(f) \approx 1 - \frac{N(f)}{S(f)}$$ + +You don't have access to \(S(f)\), only to the measured \(S(f) + N(f)\) which will be used under the assumption that the noise is small, making the approximation acceptable: + +$$H(f) \approx 1 - \frac{N(f)}{S(f) + N(f)}$$ + + +with \(S(f) + N(f) = E(f)\) + +- \(E(f)\) is the observed energy in a frequency band. + +It can be rewritten as: + +$$H(f) \approx \frac{E(f) - N(f)}{E(f)}$$ + +- \(N(f)\) is estimated when there is no speech. + +In the Python code below, you’ll see this formula implemented as: + +```python +scaling = (energy - self._noise)/energy +``` + +(Don’t evaluate this Python code in your Jupyter notebook—it will be run later as part of the full implementation.) + +### NoiseSuppression and NoiseSuppressionReference classes + +The entire algorithm will be packaged as a Python class. +The class functions are explained below using Python code that should not be evaluated in the Jupyter notebook. + +You should only evaluate the full class definition in the Jupyter notebook—not the code snippets used for explanation. + + +#### NoiseSuppression constructor + +`NoiseSuppression` is a shared class used by both the float reference implementation and the Q15 version. + +```python +class NoiseSuppression(): + def __init__(self,slices): + self._windowLength=len(slices[0]) + self._fftLen,self._fftShift=fft_length(self._windowLength) + + self._padding_left=(self._fftLen - self._windowLength)//2 + self._padding_right=self._fftLen- self._windowLength-self._padding_left + + self._signal=[] + self._slices=slices + self._window=None +``` + +The constructor for `NoiseSuppression`: +- Uses the audio slices as input +- Computes the FFT length that can be used for each slice +- Computes the padding needed for the FFT + +The FFT length must be a power of 2. The slice length is not necessarily a power of 2. The constructor computes the closest usable power of 2. The audio slices are padded with zeros on both sides to match the required FFT length. + +#### NoiseSuppressionReference constructor + +```python +class NoiseSuppressionReference(NoiseSuppression): + def __init__(self,slices): + NoiseSuppression.__init__(self,slices) + + # Compute the vad signal + self._vad=clean_vad([signal_vad(w) for w in slices]) + self._noise=np.zeros(self._fftLen) + # The Hann window + self._window=dsp.arm_hanning_f32(self._windowLength) +``` + +The constructor for `NoiseSuppressionReference`: +- Uses the audio slices as input +- Call the constructor for `NoiseSuppression` +- Computes the VAD signal for the full audio signal +- Compute the Hanning window + + +#### subnoise +```python +def subnoise(self,v): + # This is a Wiener estimate. + energy = v * np.conj(v) + 1e-6 + + scaling = (energy - self._noise)/energy + scaling[scaling<0] = 0 + + return(v * scaling) +``` + +This function computes the approximate Wiener gain. +If the gain is negative, it is set to 0. +A small value is added to the energy to avoid division by zero. +This function is applied to all frequency bands of the FFT. The `v` argument is a vector. + +#### remove_noise +```python +def remove_noise(self,w): + # We pad the signal with zeros. This assumes the padding is divisible by 2. + # A more robust implementation would also handle the odd-length case. + # The FFT length is greater than the window length and must be a power of 2. + sig=self.window_and_pad(w) + + # FFT + fft=np.fft.fft(sig) + # Noise suppression + fft = self.subnoise(fft) + # IFFT + res=np.fft.ifft(fft) + # We assume the result should be real, so we ignore the imaginary part. + res=np.real(res) + # We remove the padding. + res=self.remove_padding(res) + return(res) +``` + +The function computes the FFT (with padding) and reduces noise in the frequency bands using the approximate Wiener gain. + +#### estimate_noise +```python + def estimate_noise(self,w): + # Compute the padded signal. + sig=self.window_and_pad(w) + fft=np.fft.fft(sig) + + # Estimate the noise energy. + self._noise = np.abs(fft)*np.abs(fft) + + # Remove the noise. + fft = self.subnoise(fft) + + # Perform the IFFT, assuming the result is real, so we ignore the imaginary part. + res=np.fft.ifft(fft) + res=np.real(res) + res=self.remove_padding(res) + return(res) +``` + +This function is very similar to the previous one. +It's used when no speech detected. +It updates the noise estimate before reducing the noise. + + +#### nr + +```python +def nr(self): + for (w,v) in zip(self._slices,self._vad): + result=None + if v==1: + # If voice is detected, we only remove the noise. + result=self.remove_noise(w) + else: + # If no voice is detected, we update the noise estimate. + result=self.estimate_noise(w) + self._signal.append(result) +``` + +The main function: it removes noise from each slice. +If a slice does not contain speech, the noise estimate is updated before reducing noise in each frequency band. + +#### overlap_and_add + +The filtered slices are recombined: + +```python +def overlap_and_add(self): + offsets = range(0, len(self._signal)*winOverlap,winOverlap) + offsets=offsets[0:len(self._signal)] + res=np.zeros(len(data)) + i=0 + for n in offsets: + res[n:n+winLength]+=self._signal[i] + i=i+1 + return(res) +``` + +### The final code for the Python class + +You can evaluate this code in your Jupyter notebook. + +```python +def fft_length(length): + result=2 + fft_shift=1 + while result < length: + result = 2*result + fft_shift = fft_shift + 1 + return(result,fft_shift) + +class NoiseSuppression(): + def __init__(self,slices): + self._windowLength=len(slices[0]) + self._fftLen,self._fftShift=fft_length(self._windowLength) + + self._padding_left=(self._fftLen - self._windowLength)//2 + self._padding_right=self._fftLen- self._windowLength-self._padding_left + + self._signal=[] + self._slices=slices + self._window=None + + def window_and_pad(self,w): + if w.dtype==np.int32: + w=dsp.arm_mult_q31(w,self._window) + elif w.dtype==np.int16: + w=dsp.arm_mult_q15(w,self._window) + else: + w = w*self._window + sig=np.hstack([np.zeros(self._padding_left,dtype=w.dtype),w,np.zeros(self._padding_right,dtype=w.dtype)]) + return(sig) + + def remove_padding(self,w): + return(w[self._padding_left:self._padding_left+self._windowLength]) + +class NoiseSuppressionReference(NoiseSuppression): + def __init__(self,slices): + # In a better version this could be computed from the signal length by taking the + # smaller power of two greater than the signal length. + NoiseSuppression.__init__(self,slices) + + # Compute the vad signal + self._vad=clean_vad([signal_vad(w) for w in slices]) + self._noise=np.zeros(self._fftLen) + # The Hann window + self._window=dsp.arm_hanning_f32(self._windowLength) + + # Subtract the noise + def subnoise(self,v): + # This is a Wiener estimate + energy = v * np.conj(v) + 1e-6 + + scaling = (energy - self._noise)/energy + scaling[scaling<0] = 0 + + return(v * scaling) + + def remove_noise(self,w): + # We pad the signal with zero. It assumes that the padding can be divided by 2. + # In a better implementation we would manage also the odd case. + # The padding is required because the FFT has a length which is greater than the length of + # the window + sig=self.window_and_pad(w) + + # FFT + fft=np.fft.fft(sig) + # Noise suppression + fft = self.subnoise(fft) + # IFFT + res=np.fft.ifft(fft) + # We assume the result should be real so we just ignore the imaginary part + res=np.real(res) + # We remove the padding + res=self.remove_padding(res) + return(res) + + + + def estimate_noise(self,w): + # Compute the padded signal + sig=self.window_and_pad(w) + fft=np.fft.fft(sig) + + # Estimate the noise energy + self._noise = np.abs(fft)*np.abs(fft) + + # Remove the noise + fft = self.subnoise(fft) + + # IFFT and we assume the result is real so we ignore imaginary part + res=np.fft.ifft(fft) + res=np.real(res) + res=self.remove_padding(res) + return(res) + + # Process all the windows using the VAD detection + def nr(self): + for (w,v) in zip(self._slices,self._vad): + result=None + if v==1: + # If voice detected, we only remove the noise + result=self.remove_noise(w) + else: + # If no voice detected, we update the noise estimate + result=self.estimate_noise(w) + self._signal.append(result) + + # Overlap and add to rebuild the signal + def overlap_and_add(self): + offsets = range(0, len(self._signal)*winOverlap,winOverlap) + offsets=offsets[0:len(self._signal)] + res=np.zeros(len(data)) + i=0 + for n in offsets: + res[n:n+winLength]+=self._signal[i] + i=i+1 + return(res) +``` +You can now test this algorithm on the original signal: + +```python +n=NoiseSuppressionReference(slices) +n.nr() +cleaned=n.overlap_and_add() +plt.plot(cleaned) +plt.show() +``` + +![cleaned alt-text#center](cleaned.png "Figure 6. Cleaned signal") + +You can now listen to the result: + +```python +audioRef=Audio(data=cleaned,rate=samplerate,autoplay=False) +audioRef +``` \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-6.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-6.md new file mode 100644 index 0000000000..10e9f97fcc --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-6.md @@ -0,0 +1,414 @@ +--- +title: Write the CMSIS-DSP Q15 implementation +weight: 7 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Write the CMSIS-DSP Q15 implementation + +### Slicing + +The CMSIS-DSP implementation is very similar to the reference implementation you just tested. + +With the following code, you’ll check that recombining the windowed block samples works correctly. +Since the Q15 representation is less accurate than float and can saturate, it’s a good idea to verify the recombination step. + +The Hanning window is converted to Q15 format. Then, the slices are multiplied by the Q15 Hanning window and summed. +The final result is converted to float. + +```python +offsets = range(0, len(data),winOverlap) +offsets=offsets[0:len(slices_q15)] +res=np.zeros(len(data)) +window_q15=fix.toQ15(window) +i=0 +for n in offsets: + w = dsp.arm_mult_q15(slices_q15[i],window_q15) + res[n:n+winLength] = dsp.arm_add_q15(res[n:n+winLength],w) + i=i+1 +res_q15=fix.Q15toF32(res) +plt.plot(res_q15) +plt.show() +``` +You can now listen to the audio to check the result: +```python +audio4=Audio(data=res_q15,rate=samplerate,autoplay=False) +audio4 +``` +### Utilities + +CMSIS-DSP does not have a complex data type. Complex numbers are represented as a float array with alternating real and imaginary parts: real, imaginary, real, imaginary, and so on. + +You’ll need functions to convert to and from NumPy complex arrays. + +```python +def imToReal1D(a): + ar=np.zeros(np.array(a.shape) * 2) + ar[0::2]=a.real + ar[1::2]=a.imag + return(ar) + +def realToIm1D(ar): + return(ar[0::2] + 1j * ar[1::2]) +``` + +## The final Q15 implementation + +Try the final implementation first, and then we’ll analyze the differences from the reference implementation. + +```python +class NoiseSuppressionQ15(NoiseSuppression): + def __init__(self,slices): + NoiseSuppression.__init__(self,slices) + + # VAD signal. + self._vad= clean_vad(np.array([signal_vad_q15(w) for w in slices])) + self._noise=np.zeros(self._fftLen,dtype=np.int32) + # Q15 version of the Hanning window. + self._window=fix.toQ15(dsp.arm_hanning_f32(self._windowLength)) + # CFFT Q15 instance. + self._cfftQ15=dsp.arm_cfft_instance_q15() + status=dsp.arm_cfft_init_q15(self._cfftQ15,self._fftLen) + + self._noise_status = -1 + self._noise_max = 0x7FFF + + + # Subtract the noise. + def subnoise(self,v,status,the_max): + + # We cannot compute the energy in Q15, because many values would otherwise be 0 + # The noise signal is too small for its energy to be representable in Q15. + # So we convert to Q31 and perform noise subtraction in Q31. + vq31 = dsp.arm_q15_to_q31(v) + energy = dsp.arm_cmplx_mag_squared_q31(vq31) + + # The energy for the signal and noise were computed on a rescaled signal. + # So, we remove the scaling from the values before computing the ratio (energy - noise) / energy. + # `status == 0` means the signal has been rescaled. + if status==0: + the_max_q31=dsp.arm_q15_to_q31([the_max])[0] + energy=dsp.arm_scale_q31(energy,the_max_q31,0) + energy=dsp.arm_scale_q31(energy,the_max_q31,0) + + noise = self._noise + # `status == 0` means the noise has been rescaled. + if self._noise_status==0: + the_max_q31=dsp.arm_q15_to_q31([self._noise_max])[0] + noise=dsp.arm_scale_q31(noise,the_max_q31,0) + noise=dsp.arm_scale_q31(noise,the_max_q31,0) + + + temp = dsp.arm_sub_q31(energy , noise) + temp[temp<0]=0 + + scalingQ31 = np.zeros(len(temp),dtype=np.int32) + shift = np.zeros(len(temp),dtype=np.int32) + + # The scaling factor `(energy - noise) / energy` is computed. + k=0 + # We assume that `|energy - noise|<=energy` + # Otherwise, we set scaling to `1` + # If energy is `0`, we also set scaling to `1`. + # When `a == b`, `shiftVal` is equal to `1` because `1` (as the result of the division operator) + # is represented as `0x40000000` with a shift of `1` instead of `0x7FFFFFFF` for output of division + # We handle this case separately + for a,b in zip(temp,energy): + quotient=0x7FFFFFFF + shiftVal=0 + if b!=0 and a!=b: + # We compute the quotient. + status,quotient,shiftVal = dsp.arm_divide_q31(a,b) + if shiftVal > 0: + quotient=0x7FFFFFFF + shiftVal = 0 + + scalingQ31[k] = quotient + shift[k] = shiftVal + + k = k + 1 + + + res=dsp.arm_cmplx_mult_real_q31(vq31,scalingQ31) + resQ15 = dsp.arm_q31_to_q15(res) + + return(resQ15) + + # To achieve maximum accuracy with the Q15 FFT, the signal is rescaled before computing the FFT + # It is divided by its maximum value. + def rescale(self,w): + the_max,index=dsp.arm_absmax_q15(w) + + quotient=0x7FFF + the_shift=0 + status = -1 + if the_max != 0: + status,quotient,the_shift = dsp.arm_divide_q15(0x7FFF,the_max) + if status == 0: + w=dsp.arm_scale_q15(w,quotient,the_shift) + return(w,status,the_max) + + # The scaling is removed after the IFFT is computed. + def undo_scale(self,w,the_max): + w=dsp.arm_scale_q15(w,the_max,0) + return(w) + + + def remove_noise(self,w): + w,status,the_max = self.rescale(w) + sig=self.window_and_pad(w) + + # Convert to complex. + signalR=np.zeros(len(sig) * 2,dtype=np.int16) + signalR[0::2]=sig + + + if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) + else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) + + resultR = self.subnoise(resultR,status,the_max) + + if dsp.has_neon(): + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,tmp=self._tmp) + else: + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,1) + res = dsp.arm_shift_q15(res,self._fftShift) + + res=res[0::2] + res=self.remove_padding(res) + + if status == 0: + res=self.undo_scale(res,the_max) + return(res) + + def estimate_noise(self,w): + w,status,the_max = self.rescale(w) + self._noise_status = status + self._noise_max = the_max + + sig=self.window_and_pad(w) + + signalR=np.zeros(len(sig) * 2) + signalR[0::2]=sig + + if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) + else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) + + resultRQ31 = dsp.arm_q15_to_q31(resultR) + + + self._noise = dsp.arm_cmplx_mag_squared_q31(resultRQ31) + + + resultR = np.zeros(len(resultR),dtype=np.int16) + + if dsp.has_neon(): + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,tmp=self._tmp) + else: + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,1) + res = dsp.arm_shift_q15(res,self._fftShift) + + res=res[0::2] + res=self.remove_padding(res) + + if status == 0: + res=self.undo_scale(res,the_max) + + return(res) + + def do_nothing(self,w): + w,status,the_max = self.rescale(w) + sig=self.window_and_pad(w) + + + # Convert to complex. + signalR=np.zeros(len(sig) * 2,dtype=np.int16) + signalR[0::2]=sig + + + if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,tmp=self._tmp) + else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,1) + res = dsp.arm_shift_q15(res,self._fftShift) + + res=res[0::2] + + res=self.remove_padding(res) + + if status == 0: + res=self.undo_scale(res,the_max) + + return(res) + + + def nr(self,nonr=False): + if dsp.has_neon(): + tmp_nb = dsp.arm_cfft_tmp_buffer_size(dt.Q15,self._fftLen,1) + self._tmp = np.zeros(tmp_nb,dtype=np.int16) + for (w,v) in zip(self._slices,self._vad): + result=None + if nonr: + result = self.do_nothing(w) + else: + if v==1: + result=self.remove_noise(w) + else: + result=self.estimate_noise(w) + self._signal.append(result) + + def overlap_and_add(self): + nbSamples = len(self._signal)*winOverlap + offsets = range(0, nbSamples,winOverlap) + offsets=offsets[0:len(self._signal)] + res=np.zeros(nbSamples,dtype=np.int16) + i=0 + for n in offsets: + res[n:n+winLength] = dsp.arm_add_q15(res[n:n+winLength],self._signal[i]) + i=i+1 + return(res) +``` + +Verify that the Q15 algorithm is working: + +```python +n=NoiseSuppressionQ15(slices_q15) +n.nr() +cleaned_q15=n.overlap_and_add() +plt.plot(fix.Q15toF32(cleaned_q15)) +plt.show() +``` + +You can now listen to the result: + +```python +audioQ15=Audio(data=fix.Q15toF32(cleaned_q15),rate=samplerate,autoplay=False) +audioQ15 +``` + +## Differences with the float implementation + +There are many differences from the original float implementation, which are explained below. + +### constructor + +The constructor is similar and uses Q15 instead of float. The Hanning window is converted to Q15, and Q15 versions of the CFFT objects are created. + +### subnoise + +The noise reduction function is more complex for several reasons: + +- Q15 is not accurate enough for the energy computation. Q31 is used instead. For instance: +```python +vq31 = dsp.arm_q15_to_q31(v) +energy = dsp.arm_cmplx_mag_squared_q31(vq31) +``` + +- For maximum accuracy, the signal is rescaled before calling this function. Since energy is not a linear function, the scaling factor must be compensated when computing the Wiener gain. The argument `status` is zero when the scaling has been applied. A similar scaling factor is applied to the noise: +```python +if status==0: + the_max_q31=dsp.arm_q15_to_q31([the_max])[0] + energy=dsp.arm_scale_q31(energy,the_max_q31,0) + energy=dsp.arm_scale_q31(energy,the_max_q31,0) +``` + +- CMSIS-DSP fixed-point division represents 1 exactly. So in Q31, instead of using `0x7FFFFFFF`, `1` is represented as `0x40000000` with a shift of `1`. This behavior is handled in the algorithm when converting the scaling factor to an approximate Q31 value: +```python +status,quotient,shiftVal = dsp.arm_divide_q31(a,b) +if shiftVal > 0: + quotient=0x7FFFFFFF + shiftVal = 0 +``` + +- The final scaling is performed using a Q31 multiplication, and the result is converted back to Q15: +```python +res = dsp.arm_cmplx_mult_real_q31(vq31,scalingQ31) +resQ15 = dsp.arm_q31_to_q15(res) +``` + +### rescaling + +To achieve maximum accuracy in Q15, the signal (and noise) is rescaled before computing the energy. +This rescaling function did not exist in the float implementation. The signal is divided by its maximum value to bring it to full scale:: + +```python +def rescale(self,w): + the_max,index=dsp.arm_absmax_q15(w) + + quotient=0x7FFF + the_shift=0 + status = -1 + if the_max != 0: + status,quotient,the_shift = dsp.arm_divide_q15(0x7FFF,the_max) + if status == 0: + w=dsp.arm_scale_q15(w,quotient,the_shift) + return(w,status,the_max) +``` + +The scaling must be reversed after the IFFT to allow recombining the slices and reconstructing the signal: + +```python +def undo_scale(self,w,the_max): + w=dsp.arm_scale_q15(w,the_max,0) + return(w) +``` + +### noise suppression + +The algorithm closely follows the float implementation. +However, there is a small difference because CMSIS-DSP can be built for Cortex-A and Cortex-M. On Cortex-A, there are small differences in the FFT API, as it uses a different implementation. + +If the Python package has been built with Neon acceleration, it will use the new API that requires an additional temporary buffer. + +If this temporary buffer is not provided, the Python package will allocate it automatically. While you can use the same API, this is less efficient. + +It is better to detect whether the package has been compiled with Neon acceleration, allocate a temporary buffer and use it in the FFT calls. This approach is closer to how the C API must be used. + +```python +if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) +else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) +``` + +In the Neon version, the FFT’s bit-reversal flag is no longer available. It’s not possible to disable bit reversal in the Neon version. + +A scaling factor must be applied to the IFFT output: + +```python +res = dsp.arm_shift_q15(res,self._fftShift) +``` + +This scaling is unrelated to the signal and noise scaling used for improved accuracy. + +The output of the Q15 IFFT is not in Q15 format and must be converted. This is typical of fixed-point FFTs, and the same applies to Q31 FFTs. + +Finally, the accuracy-related scaling factor is removed at the end of the function: + +```python +if status == 0: + res=self.undo_scale(res,the_max) +``` + +### noise estimation + +The noise estimation function performs both noise estimation and noise suppression. + +Noise energy is computed in Q31 for higher accuracy. +The FFT functions detect whether the package was built with Neon support. + +### donothing + +`donothing` is a debug function. You can disable noise reduction and test only slicing, overlap-add, and the FFT/IFFT in between. + +This function applies scaling and performs the FFT/IFFT. + +It’s a good way to check for saturation issues (which are common with fixed-point arithmetic) and to ensure proper scaling compensation. diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-7.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-7.md new file mode 100644 index 0000000000..94f3f21b47 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-7.md @@ -0,0 +1,118 @@ +--- +title: Convert the CMSIS-DSP Python to C +weight: 8 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Convert the CMSIS-DSP Python to C + +Once the Python code is working, writing the C code should be straightforward, since the CMSIS-DSP Python wrapper’s API closely mirrors the C API. + +### Rescaling +For example, let’s look at rescaling +#### Python version + +```python +def rescale(self,w): + the_max,index=dsp.arm_absmax_q15(w) + + quotient=0x7FFF + the_shift=0 + status = -1 + if the_max != 0: + status,quotient,the_shift = dsp.arm_divide_q15(0x7FFF,the_max) + if status == 0: + w=dsp.arm_scale_q15(w,quotient,the_shift) + return(w,status,the_max) +``` +#### C Version + +```C +#include "dsp/basic_math_functions.h" +#include "dsp/statistics_functions.h" + +arm_status rescale(q15_t *w, uint32_t nb,q15_t *the_max) +{ + uint32_t index; + q15_t quotient = 0x7FFF; + /* Default status value for signal is zero so can't be rescaled */ + arm_status status = ARM_MATH_SINGULAR; + int16_t the_shift = 0; + *the_max=0; + + arm_absmax_q15(w,nb,the_max,&index); + if (*the_max != 0) + { + status = arm_divide_q15(0x7FFF,*the_max,"ient,&the_shift); + if (status == ARM_MATH_SUCCESS) + { + arm_scale_q15(w,quotient,(int8_t)the_shift,w,nb); + } + } + + return(status); + +} + +``` + +### Signal energy + +#### Python version +```python +def signal_energy_q15(window): + mean=dsp.arm_mean_q15(window) + # If we subtract the mean, we won't get saturation. + # So we use the CMSIS-DSP negate function on an array containing a single sample. + neg_mean=dsp.arm_negate_q15([mean])[0] + window=dsp.arm_offset_q15(window,neg_mean) + energy=dsp.arm_power_q15(window) + # Energy is not in Q15 (refer to CMSIS-DSP documentation). + energy=dsp.ssat(energy>>20,16) + dB=dsp.arm_vlog_q15([energy]) + # The output of the `vlog` is not in Q15 + # The multiplication by `10` is missing compared to the NumPy + # reference implementation. + # The result of this function is not equivalent to the float implementation due to the different + # formats used in the intermediate computations. + # As a consequence, a different threshold will have to be used + # to compensate for these differences. + return(dB[0]) +``` + +#### C version +```C +#include "dsp/basic_math_functions.h" +#include "dsp/fast_math_functions.h" +#include "dsp/statistics_functions.h" + +int16_t signal_energy_q15(q15_t *window,uint32_t nb) +{ + q15_t mean,neg_mean; + arm_mean_q15(window,nb,&mean); + + arm_negate_q15(&mean,&neg_mean,1); + + arm_offset_q15(window,neg_mean,window,nb); + + q63_t energy_q63; + q15_t energy; + arm_power_q15(window,nb,&energy_q63); + + energy=(q15_t)__SSAT((q31_t)(energy_q63>>20),16); + + // Fixed point format of result is on 16 bit + // but the specific format has not been identified + // to make this tutorial easier. + // We just know it is not q15 + int16_t dB; + + arm_vlog_q15(&energy,&dB,1); + + return(dB); +} +``` + +A DSP function written in Python using CMSIS-DSP can be easily converted into a similar C function. diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-8.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-8.md new file mode 100644 index 0000000000..5fc02c677a --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-8.md @@ -0,0 +1,27 @@ +--- +title: Study more examples +weight: 9 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Study more examples + +The [CMSIS-DSP python example folder](https://github.com/ARM-software/CMSIS-DSP/tree/main/PythonWrapper/examples) contains many tests, examples, and some Jupyter notebooks. + +You can study these examples to gain a better understanding of how to use the Python package. + +The [CMSIS-DSP python package](https://pypi.org/project/cmsisdsp/) describes the differences between the Python API and the C API. + + +## Remaining issues + +The CMSIS-DSP Python package helps to design and translate a DSP function working on a block of samples from Python to C. +But in a real application, you don’t receive blocks of samples, but rather a continuous stream. + +The stream of samples must be split into blocks before the DSP function can be used. The processed blocks may need to be recombined to reconstruct a signal. + +Part of the difficulty in this learning path comes from splitting and recombining the signal. Translating this part of the Python code to C adds further complexity. + +[CMSIS-Stream](https://github.com/ARM-software/CMSIS-Stream) may help for this. It is a platform-independent technology designed to simplify the use of block-processing functions with sample streams. It is a low-overhead solution. diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/signal.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/signal.png new file mode 100644 index 0000000000..83640fdbe5 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/signal.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/sumhanning.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/sumhanning.png new file mode 100644 index 0000000000..e9a8ca4da5 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/sumhanning.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/vad.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/vad.png new file mode 100644 index 0000000000..7b2d0195b7 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/vad.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/build_nn.md b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/build_nn.md index c5d8402043..72390b8268 100644 --- a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/build_nn.md +++ b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/build_nn.md @@ -91,7 +91,7 @@ plt.show() The expected output is shown below -![output1](Images/lab4_1.PNG) +![output1](images/lab4_1.PNG) Next, normalize all the training and testing data to have values between 0 and 1. This normalization facilitates machine learning. Each RGB value ranges from 0 to 255, so divide the training and testing data by 255. @@ -124,7 +124,7 @@ You are going to create a small convolutional neural network for image classific Here is an image illustrating the network architecture. Note that only convolution and dense layers are illustrated in this image. -![output2](Images/lab4_2.PNG) +![output2](images/lab4_2.PNG) Execute the code blocks below to create a sequential model and add the layers diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/deploy_nn.md b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/deploy_nn.md index dc4619ee98..d740e10fa1 100644 --- a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/deploy_nn.md +++ b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/deploy_nn.md @@ -52,15 +52,15 @@ In this section, you will deploy the model directly on the STM32 board. 5. Set `Toolchain/IDE` as `STM32CubeIDE` -![output3](Images/lab4_3.PNG) +![output3](images/lab4_3.PNG) 6. Go to `Pinout & Configuration` and clear pinouts from the `Pinout` menu. -![output4](Images/lab4_4.PNG) +![output4](images/lab4_4.PNG) 7. In `Software Packs` menu, click `Select Components`. Enable `X-CUBE-AI`. For device application, choose `Validation`. Click `OK` to save. -![output5](Images/lab4_5.PNG) +![output5](images/lab4_5.PNG) 8. Navigate to `X-CUBE-AI` configuration. @@ -70,7 +70,7 @@ In this section, you will deploy the model directly on the STM32 board. 11. Generate the validation code for the model by clicking `Generate Code`. -![output6](Images/lab4_6.PNG) +![output6](images/lab4_6.PNG) 12. Open STM32CubeIDE. @@ -80,10 +80,10 @@ In this section, you will deploy the model directly on the STM32 board. 15. Ensure that the board is connected to your computer. If it is correctly connected, build and flash the code by clicking `Run As`. -![output7](Images/lab4_7.PNG) +![output7](images/lab4_7.PNG) 16. If you get an ‘undefined reference’ error, go to `Core/Src/main.c`. Remove `static` from the declaration of the `MX_USART1_UART_Init()` function and also from its definition. Try `Run As` again. -![output8](Images/lab4_8.PNG) +![output8](images/lab4_8.PNG) With the model now deployed on the STM32 board, you are ready to test it. diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_1.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_1.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_1.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_1.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_10.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_10.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_10.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_10.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_11.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_11.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_11.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_11.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_12.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_12.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_12.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_12.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_2.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_2.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_2.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_2.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_3.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_3.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_3.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_3.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_4.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_4.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_4.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_4.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_5.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_5.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_5.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_5.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_6.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_6.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_6.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_6.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_7.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_7.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_7.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_7.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_8.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_8.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_8.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_8.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_9.PNG b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_9.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Images/lab4_9.PNG rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/images/lab4_9.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Project_Files/img_class_stcube.zip b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/project_files/img_class_stcube.zip similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/Project_Files/img_class_stcube.zip rename to content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/project_files/img_class_stcube.zip diff --git a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/run_nn.md b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/run_nn.md index a367b8cefd..75d94ac8c8 100644 --- a/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/run_nn.md +++ b/content/learning-paths/embedded-and-microcontrollers/img_nn_stcube/run_nn.md @@ -37,20 +37,20 @@ If the board is not detected, click the black button on the board to reset, then Select the model network from the list of models deployed on the board. -![output9](Images/lab4_9.PNG) +![output9](images/lab4_9.PNG) Select the network and the label file (`Data/labels/cifar10_labels.txt`) -![output10](Images/lab4_10.PNG) +![output10](images/lab4_10.PNG) Open an image to test. The tool will automatically launch a new pane, and show the inference result. Observe that the model correctly predicted the label. In addition, note the time taken to finish the prediction. -![output11](Images/lab4_11.PNG) +![output11](images/lab4_11.PNG) You can also use your workstation camera to test image classification. Hold an appropriate picture up to your camera, then press `S`. The tool captures the image and sends it to the board. -![output12](Images/lab4_12.PNG) +![output12](images/lab4_12.PNG) You have now successfully ran the model on your STM32 board. diff --git a/content/learning-paths/embedded-and-microcontrollers/introduction-to-tinyml-on-arm/3-env-setup-FVP.md b/content/learning-paths/embedded-and-microcontrollers/introduction-to-tinyml-on-arm/3-env-setup-fvp.md similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/introduction-to-tinyml-on-arm/3-env-setup-FVP.md rename to content/learning-paths/embedded-and-microcontrollers/introduction-to-tinyml-on-arm/3-env-setup-fvp.md diff --git a/content/learning-paths/embedded-and-microcontrollers/nav-mlek/sw.md b/content/learning-paths/embedded-and-microcontrollers/nav-mlek/sw.md index a2011ab0cb..1f129e9d63 100644 --- a/content/learning-paths/embedded-and-microcontrollers/nav-mlek/sw.md +++ b/content/learning-paths/embedded-and-microcontrollers/nav-mlek/sw.md @@ -11,7 +11,7 @@ layout: "learningpathall" You should use an `x86_64` development machine running Windows or Linux for the best experience. -The [Arm ML Evaluation Kit (MLEK)](https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ml-embedded-evaluation-kit) is not fully supported on Windows. Some of the required tools work only on Linux. Linux is recommended if you plan to use MLEK extensively. +The [Arm ML Evaluation Kit (MLEK)](https://git.mlplatform.org/ml/ethos-u/ml-embedded-evaluation-kit.git/) is not fully supported on Windows. Some of the required tools work only on Linux. Linux is recommended if you plan to use MLEK extensively. There are some ML examples which can be developed using Windows tools. @@ -61,7 +61,7 @@ Use an editor or program such as `more` or `cat` to view the Dockerfile. ## Machine learning Frameworks -[TensorFlow Lite for Microcontrollers (TFLM)](https://www.tensorflow.org/lite/embedded-and-microcontrollers/) is on of the more common framework for microcontroller ML applications. +[LiteRT for Microcontrollers](https://ai.google.dev/edge/litert/microcontrollers/overview), formerly known as TensorFlow Lite for Microcontrollers is one of the more common framework for microcontroller ML applications. TensorFlow uses [`xxd`](https://linux.die.net/man/1/xxd/) to convert TensorFlow Lite models into C data structures. diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/features.md b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/features.md index 24cca9f7ff..25023fc47d 100644 --- a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/features.md +++ b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/features.md @@ -54,7 +54,7 @@ for idx, file in enumerate(data_files): You can check the extracted features with this code block. These are the extracted features from one data sample. Expected output shown below: -![output5](Images/output5.PNG) +![output5](images/output5.PNG) ## Feature based model diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output1.PNG b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output1.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output1.PNG rename to content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output1.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output2.PNG b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output2.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output2.PNG rename to content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output2.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output3.PNG b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output3.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output3.PNG rename to content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output3.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output4.PNG b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output4.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output4.PNG rename to content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output4.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output5.PNG b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output5.PNG similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Images/output5.PNG rename to content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/images/output5.PNG diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Project_Files/tf_stm32.zip b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/project_files/tf_stm32.zip similarity index 100% rename from content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/Project_Files/tf_stm32.zip rename to content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/project_files/tf_stm32.zip diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/test.md b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/test.md index cf247622fc..dc7751d42b 100644 --- a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/test.md +++ b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/test.md @@ -77,7 +77,7 @@ plt.legend(loc='lower right') ``` In this example, see that the training and validation accuracy start to converge after around 150 epochs. This means that the 200 epochs are enough to train the model. If you train the model for too many epochs, then the validation accuracy may drop due to overfitting. If you experience this, re-run [training](#train) with an appropriate epoch value. -![output2](Images/output2.PNG) +![output2](images/output2.PNG) ## Investigate learning rate (optional) @@ -102,7 +102,7 @@ plt.legend(loc='lower right') ``` Expected output shown below: -![output3](Images/output3.PNG) +![output3](images/output3.PNG) Now try a lower learning rate, which is 0.0001. Execute the code block. The graph shows the training and validation loss values decrease much more slowly. So, it is important to use a proper learning rate in training. @@ -124,7 +124,7 @@ plt.legend(loc='lower right') Expected output shown below: -![output4](Images/output4.PNG) +![output4](images/output4.PNG) With the model trained, you are now ready to test it. diff --git a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/train_nn.md b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/train_nn.md index d69fd49196..17942e6cbc 100644 --- a/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/train_nn.md +++ b/content/learning-paths/embedded-and-microcontrollers/tflow_nn_stcube/train_nn.md @@ -162,4 +162,4 @@ plot_single_sample(data_sample=data[idx], label=labels[idx]) Example output is shown below: -![output1](Images/output1.PNG) +![output1](images/output1.PNG) diff --git a/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md b/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md index 6eb4025fcf..5c286ae95a 100644 --- a/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md +++ b/content/learning-paths/embedded-and-microcontrollers/tfm/_index.md @@ -1,27 +1,34 @@ --- -armips: -- Cortex-M -- Corstone -author: Pareena Verma -layout: learningpathall +title: Get started with Trusted Firmware-M + +minutes_to_complete: 15 + +who_is_this_for: This is an introductory topic for software developers new to Trusted + Firmware-M. + + learning_objectives: - Build and run the reference TF-M tests and example application. -learning_path_main_page: 'yes' -minutes_to_complete: 15 -operatingsystems: -- Baremetal + prerequisites: - Some familiarity with embedded C programming - A machine running Ubuntu Linux -skilllevels: Introductory -subjects: Security + +author: Pareena Verma + test_images: - armswdev/arm-tools:bare-metal-compilers test_link: null test_maintenance: true -test_status: -- passed -title: Get started with Trusted Firmware-M + +### Tags +skilllevels: Introductory +subjects: Security +armips: +- Cortex-M +- Corstone +operatingsystems: +- Baremetal tools_software_languages: - Arm Virtual Hardware - Fixed Virtual Platform @@ -48,7 +55,11 @@ further_reading: type: website -weight: 1 -who_is_this_for: This is an introductory topic for software developers new to Trusted - Firmware-M. +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +layout: learningpathall + --- diff --git a/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md b/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md index 1ecd1211d0..1dc3bb5b73 100644 --- a/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md +++ b/content/learning-paths/embedded-and-microcontrollers/zephyr/_index.md @@ -1,26 +1,33 @@ --- -armips: -- Cortex-M -author: Pareena Verma -layout: learningpathall +title: Run the Zephyr RTOS on Arm Corstone-300 + +minutes_to_complete: 30 + +who_is_this_for: This is an introductory topic for software developers getting started + with the Zephyr RTOS. + + learning_objectives: - Build and run Zephyr applications on the Corstone-300 -learning_path_main_page: 'yes' -minutes_to_complete: 30 -operatingsystems: -- RTOS + prerequisites: - Some familiarity with embedded C programming - A Linux machine running Ubuntu, or an AWS account to use [Arm Virtual Hardware](https://www.arm.com/products/development-tools/simulation/virtual-hardware) -skilllevels: Introductory -subjects: RTOS Fundamentals + +author: Pareena Verma + test_images: - amd64/ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed -title: Run the Zephyr RTOS on Arm Corstone-300 + +### Tags +skilllevels: Introductory +subjects: RTOS Fundamentals +armips: +- Cortex-M +operatingsystems: +- RTOS tools_software_languages: - Zephyr - Arm Virtual Hardware @@ -42,7 +49,11 @@ further_reading: type: website -weight: 1 -who_is_this_for: This is an introductory topic for software developers getting started - with the Zephyr RTOS. ---- +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +layout: learningpathall + +--- \ No newline at end of file diff --git a/content/learning-paths/iot/_index.md b/content/learning-paths/iot/_index.md index 10086206f5..001ca38f09 100644 --- a/content/learning-paths/iot/_index.md +++ b/content/learning-paths/iot/_index.md @@ -24,9 +24,10 @@ tools_software_languages_filter: - Arm Compiler for Embedded: 1 - Arm Virtual Hardware: 6 - AWS IoT Greengrass: 1 +- Azure: 1 - Balena Cloud: 1 - Balena OS: 1 -- Coding: 4 +- Coding: 3 - Docker: 2 - Fixed Virtual Platform: 1 - GitHub: 3 diff --git a/content/learning-paths/iot/azure-iot/_index.md b/content/learning-paths/iot/azure-iot/_index.md index 443a3fc7cc..dc75e9e466 100644 --- a/content/learning-paths/iot/azure-iot/_index.md +++ b/content/learning-paths/iot/azure-iot/_index.md @@ -1,12 +1,12 @@ --- -title: Creating IoT Solutions in Azure for Arm64-Powered Devices +title: Create IoT Solutions in Azure for Arm Powered Devices + +minutes_to_complete: 180 draft: true cascade: draft: true -minutes_to_complete: 320 - who_is_this_for: This is an advanced topic for software developers interested in learning how to build a comprehensive IoT solution in Azure that streams, stores, monitors, aggregates, and visualizes data from Arm64-powered IoT devices. learning_objectives: @@ -21,8 +21,6 @@ learning_objectives: prerequisites: - A machine that can run Python3, and Visual Studio Code. - Azure Account and Subscription. - - Azure CLI (Command Line Interface). - - Azure IoT SDK for Python. author: Dawid Borycki @@ -37,13 +35,8 @@ operatingsystems: - Linux - macOS tools_software_languages: - - Coding + - Azure - VS Code -shared_path: true -shared_between: - - servers-and-cloud-computing - - laptops-and-desktops - - mobile-graphics-and-gaming further_reading: - resource: diff --git a/content/learning-paths/iot/azure-iot/aggregation.md b/content/learning-paths/iot/azure-iot/aggregation.md index e89871f593..4bd3ea3f8c 100644 --- a/content/learning-paths/iot/azure-iot/aggregation.md +++ b/content/learning-paths/iot/azure-iot/aggregation.md @@ -7,8 +7,7 @@ weight: 8 layout: "learningpathall" --- -## Objective -In the previous section, you configured Azure Stream Analytics to securely store incoming IoT telemetry data in Azure Cosmos DB, making sensor data readily available for further processing. In this section, you’ll enhance your IoT solution by implementing real-time data aggregation capabilities using Azure Functions. Azure Functions is a powerful, event-driven, serverless compute service provided by Azure that allows you to execute custom code in response to scheduled events without managing infrastructure. You’ll create an Azure Function that periodically queries sensor data from Cosmos DB and computes aggregated metrics, such as average, minimum, and maximum values, enabling you to derive actionable insights and monitor sensor performance more effectively. +In the previous section, you configured Azure Stream Analytics to securely store incoming IoT telemetry data in Azure Cosmos DB, making sensor data readily available for further processing. In this section, you will enhance your IoT solution by implementing real-time data aggregation capabilities using Azure Functions. Azure Functions is a powerful, event-driven, serverless compute service provided by Azure that allows you to execute custom code in response to scheduled events without managing infrastructure. You will create an Azure Function that periodically queries sensor data from Cosmos DB and computes aggregated metrics, such as average, minimum, and maximum values, enabling you to derive actionable insights and monitor sensor performance more effectively. ## Data Aggregation As your IoT solution matures, the volume of sensor data continuously captured and securely stored in Azure Cosmos DB grows rapidly. However, raw telemetry data alone may not effectively communicate actionable insights, especially when quick decision-making and proactive management are required. Transforming this raw sensor data into meaningful, summarized information becomes essential for efficient monitoring, accurate analysis, and rapid response. @@ -18,9 +17,9 @@ Aggregating sensor readings into various metrics such as average, minimum, and m In this section, you will leverage Azure Functions to implement a data aggregation. This Azure Function will respond to the HTTP trigger, and return aggregated sensor data. ### Azure Function with HTTP Trigger -Building upon the sensor data aggregation strategy, this section demonstrates how to implement a serverless Azure Function using an HTTP trigger to calculate real-time insights from sensor data stored in Azure Cosmos DB. Specifically, you’ll create an HTTP-triggered function that queries temperature readings from the past minute, computes the average temperature, and returns this aggregated value as a JSON response. This HTTP-triggered approach provides an on-demand method to access up-to-date metrics. +Building upon the sensor data aggregation strategy, this section demonstrates how to implement a serverless Azure Function using an HTTP trigger to calculate real-time insights from sensor data stored in Azure Cosmos DB. Specifically, you will create an HTTP-triggered function that queries temperature readings from the past minute, computes the average temperature, and returns this aggregated value as a JSON response. This HTTP-triggered approach provides an on-demand method to access up-to-date metrics. -To implement this functionality open the function_app.py and modify it as follows: +To implement this functionality open the `function_app.py` and modify it as follows: 1. Add the following import statements: ```python from azure.cosmos import CosmosClient @@ -128,7 +127,7 @@ def get_average_temperature(req: func.HttpRequest) -> func.HttpResponse: ) ``` -The GetAverageTemperature function is triggered by an HTTP GET request sent to the route /averagetemperature. Upon invocation, it first logs that a request has been received for calculating the average temperature based on data from the last minute. +The `GetAverageTemperature` function is triggered by an HTTP GET request sent to the route /averagetemperature. Upon invocation, it first logs that a request has been received for calculating the average temperature based on data from the last minute. The function then retrieves the Cosmos DB connection string from an environment variable. If the connection string is not available, the function logs an error and returns a 500 Internal Server Error response, indicating that essential configuration details are missing. @@ -142,7 +141,7 @@ When data points are available, the function computes the average temperature fr Finally, if the average calculation succeeds, the function constructs a JSON response containing the calculated average temperature (rounded to two decimal places) along with a success message. It then sends this response back to the caller with a status code of 200 OK and the configured CORS header {"Access-Control-Allow-Origin": "*"}, which is required to ensure that the portal can successfully retrieve and display the data from the function. -Before running the function, dependencies need to be added and installed. Open the requirements.txt file and include the following lines: +Before running the function, dependencies need to be added and installed. Open the `requirements.txt` file and include the following lines: ```json azure-cosmos @@ -162,26 +161,26 @@ func start ``` Once running, observe the HTTP trigger endpoint, which should appear similar to the following: -![img36 alt-text#center](Figures/36.png) +![img36 alt-text#center](figures/36.png) Next, start the simulator to stream sensor data and open the HTTP trigger endpoint URL in your web browser. You will see the calculated average temperature displayed: -![img37 alt-text#center](Figures/37.png) +![img37 alt-text#center](figures/37.png) ## Deploy to Azure Function App Now that your Azure Function is fully tested and ready, it's time to deploy it to Azure, making it accessible online and available for integration with other services and applications. Visual Studio Code provides an easy and efficient way to deploy Azure Functions directly from your local development environment. Follow these steps to deploy your function 1. In Visual Studio Code, open the Command Palette (Ctrl+Shift+P on Windows/Linux, or Cmd+Shift+P on macOS) and search for "Azure Functions: Deploy to Function App": -![img38 alt-text#center](Figures/38.png) +![img38 alt-text#center](figures/38.png) 2. The deployment wizard will guide you through the following selections: * Subscription: choose the Azure subscription you wish to use, * Select a function app: Select the Function App that you previously created in Azure (in this example, "IoTTemperatureAlertFunc") * Confirm your deployment: -![img39 alt-text#center](Figures/39.png) +![img39 alt-text#center](figures/39.png) 3. Wait for the deployment to complete. This process typically takes a few moments. Once deployed, your Azure Function is hosted in Azure and ready for use. 4. Open the Azure Portal, and go to your function app (in this example, "IoTTemperatureAlertFunc"). You will see the deployed functions: -![img40 alt-text#center](Figures/40.png) +![img40 alt-text#center](figures/40.png) ## Configure Function App Settings -We have just deployed the functions to Azure. Previously, when testing the functions locally, we used the local.settings.json file to store the Cosmos DB connection string. However, this local configuration file is not deployed to Azure. Therefore, we need to update the corresponding settings directly within the Azure portal. +You have just deployed the functions to Azure. Previously, when testing the functions locally, you used the `local.settings.json` file to store the Cosmos DB connection string. However, this local configuration file is not deployed to Azure. Therefore, you need to update the corresponding settings directly within the Azure portal. Azure Function App settings, which are also known as application settings or environment variables, are designed to securely store sensitive configuration information, such as database connection strings, API keys, and other confidential details. Storing the Cosmos DB connection string as an app setting in Azure ensures secure management of your database credentials, allowing your function to safely access Cosmos DB without exposing sensitive information within your source code. @@ -191,7 +190,7 @@ Follow these steps to configure the Cosmos DB connection string 3. Click the + Add button 4. Enter the name you used in your code (e.g., armiotcosmosdb_DOCUMENTDB). 5. Paste the Cosmos DB connection string into the Value field: -![img41 alt-text#center](Figures/41.png) +![img41 alt-text#center](figures/41.png) 6. Click Apply to add the setting. 7. Press Apply at the bottom to apply changes. Then, confirm to save changes @@ -200,15 +199,15 @@ Once you've configured the connection string, test your deployed Azure Function 1. Return to the Overview page of your Azure Function App. 2. Click on your HTTP-triggered function (GetAverageTemperature). 3. Click Get function URL and copy the displayed URL (under default): -![img42 alt-text#center](Figures/42.png) +![img42 alt-text#center](figures/42.png) 4. Open this URL in your web browser. 5. Start your IoT simulator to begin streaming telemetry data to Cosmos DB. 6. Refresh or access the function URL again, and you should see the calculated average temperature displayed: -![img43 alt-text#center](Figures/43.png) +![img43 alt-text#center](figures/43.png) This confirms your Azure Function successfully connects to Cosmos DB, retrieves real-time data, and calculates the average temperature as intended -## Summary and next steps -In this section, you created an HTTP-triggered Azure Function that retrieves and aggregates records from Cosmos DB. You then deployed your Azure Function to Azure, configured secure application settings to safely store the Cosmos DB connection string, and verified the functionality. You also learned that the local configuration file (local.settings.json) is not automatically deployed to Azure, making it necessary to manually set up these sensitive settings within the Azure portal. Securely managing these application settings in Azure ensures that your functions can reliably connect to Cosmos DB, facilitating the accurate retrieval and processing of IoT telemetry data. +## Next Steps +In this section, you created an HTTP-triggered Azure Function that retrieves and aggregates records from Cosmos DB. You then deployed your Azure Function to Azure, configured secure application settings to safely store the Cosmos DB connection string, and verified the functionality. -In the next step, you’ll create a static website that leverages this HTTP-triggered function to display the average temperature in a web-based portal, thus completing your IoT solution. \ No newline at end of file +In the next step, you will create a static website that leverages this HTTP-triggered function to display the average temperature in a web-based portal, thus completing your IoT solution. diff --git a/content/learning-paths/iot/azure-iot/device_registration.md b/content/learning-paths/iot/azure-iot/device_registration.md index 603a60e3aa..540606761b 100644 --- a/content/learning-paths/iot/azure-iot/device_registration.md +++ b/content/learning-paths/iot/azure-iot/device_registration.md @@ -7,19 +7,19 @@ weight: 4 layout: "learningpathall" --- -## Intro -In this section, you’ll learn how to build a comprehensive IoT simulation using Azure IoT Hub and Python. You’ll create a reusable SensorReading class designed to simulate realistic sensor readings, including temperature, pressure, humidity, and timestamps. Additionally, you’ll implement a telemetry simulator that periodically sends these sensor readings to Azure IoT Hub, enabling you to observe real-time data streaming and cloud integration. +## Overview +In this section, you will learn how to build a comprehensive IoT simulation using Azure IoT Hub and Python. You will create a reusable `SensorReading` class designed to simulate realistic sensor readings, including temperature, pressure, humidity, and timestamps. You will also implement a telemetry simulator that periodically sends these sensor readings to Azure IoT Hub, enabling you to observe real-time data streaming and cloud integration. -Finally, you’ll configure your Python application to connect securely to Azure IoT Hub, allowing you to monitor and validate the continuous data flow. By the end of this section, you’ll have hands-on experience simulating IoT telemetry, providing a solid foundation for developing more advanced data analytics and visualization solutions in Azure. +Finally, you will configure your Python application to connect securely to Azure IoT Hub, allowing you to monitor and validate the continuous data flow. By the end of this section, you will have hands-on experience simulating IoT telemetry, providing a solid foundation for developing more advanced data analytics and visualization solutions in Azure. ## Azure IoT device SDK Begin by installing the Azure IoT Device SDK for Python, which provides essential tools and libraries needed to develop IoT applications that communicate seamlessly with Azure IoT Hub. This SDK enables secure device connectivity, message transmission, and management functionalities directly from Python code. -You can install the SDK easily using Python’s package manager pip. Open a terminal or command prompt and run the following command: +You can install the SDK easily using Python’s package manager `pip`. Open a terminal or command prompt and run the following command: ``` pip install azure-iot-device ``` -The output should look similar to the following one: +The output should look like: ```output Collecting azure-iot-device Downloading azure_iot_device-2.14.0-py3-none-any.whl.metadata (15 kB) @@ -56,11 +56,11 @@ Successfully installed PySocks-1.7.1 azure-iot-device-2.14.0 deprecation-2.1.0 j ``` ## Creating a Python IoT Simulator Application -In this section, you’ll create a Python application that simulates realistic sensor data generated by an Arm64-powered IoT device and streams this data securely to Azure IoT Hub. You’ll define a reusable and structured SensorReading class, capable of generating randomized yet realistic sensor measurements, including temperature, pressure, humidity, and timestamps. +In this section, you will create a Python application that simulates realistic sensor data generated by an Arm64 powered IoT device and streams this data securely to Azure IoT Hub. You will define a reusable and structured `SensorReading` class, capable of generating randomized yet realistic sensor measurements, including temperature, pressure, humidity, and timestamps. -Following this, you’ll implement an asynchronous telemetry simulator method, which continuously generates sensor readings at predefined intervals and transmits them to Azure IoT Hub. +After this, you will implement an asynchronous telemetry simulator method, which continuously generates sensor readings at predefined intervals and transmits them to Azure IoT Hub. -To achieve the above, create the iot_simulator.py file and modify it as follows: +To achieve the above, create a file named `iot_simulator.py` and add the following content to it: ```python import asyncio @@ -146,44 +146,44 @@ if __name__ == "__main__": main() ``` -The SensorReading class is designed to encapsulate and manage data from an IoT sensor. It models a single sensor reading by holding key attributes such as the device’s unique identifier, the measured temperature, pressure, and humidity, along with a timestamp that records exactly when the reading was taken. This structured representation allows you to easily handle and process sensor data within your application. +The `SensorReading` class is designed to encapsulate and manage data from an IoT sensor. It models a single sensor reading by holding key attributes such as the device unique identifier, measured temperature, pressure, and humidity, along with a timestamp that records exactly when the reading was taken. This structured representation allows you to easily handle and process sensor data within your application. -In addition to storing these values, the class provides a to_json method, which converts the sensor reading into a JSON-formatted string. This makes it particularly useful for transmitting data over the network, as JSON is a widely accepted format in IoT communications and web services. +In addition to storing these values, the class provides a `to_json` method, which converts the sensor reading into a `JSON`formatted string. This makes it particularly useful for transmitting data over the network, as `JSON` is a widely accepted format in IoT communications and web services. -Furthermore, the class includes a static method called generate_random. This method is a utility that simulates sensor data by generating random, yet realistic, values for temperature, pressure, and humidity. It also automatically sets the current UTC time (with proper timezone awareness) as the timestamp for the reading. This feature is especially useful when you need to simulate sensor output for testing or demonstration purposes, allowing you to mimic the behavior of a real IoT device without requiring actual sensor hardware. +Furthermore, the class includes a static method called `generate_random`. This method is a utility that simulates sensor data by generating random, yet realistic, values for temperature, pressure, and humidity. It also automatically sets the current `UTC` time (with proper timezone awareness) as the timestamp for the reading. This feature is especially useful when you need to simulate sensor output for testing or demonstration purposes, allowing you to mimic the behavior of a real IoT device without requiring actual sensor hardware. -Then we have the send_telemetry method. It is an asynchronous function designed to connect an IoT device to Azure IoT Hub and continuously transmit telemetry data at specified intervals. When invoked, it begins by creating a device client instance using the provided connection string, which contains the necessary credentials to authenticate with the IoT Hub. Once the connection is established, the send_telemetry method logs a confirmation message indicating a successful connection. +Next you have the `send_telemetry` method. It is an asynchronous function designed to connect an IoT device to Azure IoT Hub and continuously transmit telemetry data at specified intervals. When invoked, it begins by creating a device client instance using the provided connection string, which contains the necessary credentials to authenticate with the IoT Hub. Once the connection is established, the `send_telemetry` method logs a confirmation message indicating a successful connection. -Inside an infinite loop, the function repeatedly generates a new sensor reading by calling the generate_random method of the SensorReading class, which simulates realistic sensor data for temperature, pressure, and humidity along with a current timestamp. This sensor data is then converted to a JSON string using the to_json method, making it suitable for transmission. The JSON message is sent to the Azure IoT Hub using the device client, and a log entry records each transmission for monitoring purposes. +Inside an infinite loop, the function repeatedly generates a new sensor reading by calling the `generate_random` method of the `SensorReading` class, which simulates realistic sensor data for temperature, pressure, and humidity along with a current timestamp. This sensor data is then converted to a `JSON` string using the `to_json` method, making it suitable for transmission. The `JSON` message is sent to the Azure IoT Hub using the device client, and a log entry records each transmission for monitoring purposes. The function includes error handling to gracefully manage interruptions. For instance, if the user stops the process (via a keyboard interrupt), it logs that the telemetry sending has been halted. Additionally, any unexpected errors are caught and logged. Finally, regardless of how the loop is exited, the function ensures that the device client is properly disconnected from the IoT Hub, logging this disconnection to maintain clear operational records. -The main function serves as the entry point of the application, where essential configuration values for connecting to Azure IoT Hub are defined. Here, it sets the CONNECTION_STRING—which you need to replace with your actual device connection string from Azure IoT Hub—along with a unique DEVICE_ID and an INTERVAL_SECONDS value that determines how frequently the telemetry data is sent (in this case, every second). +The main function serves as the entry point of the application, where essential configuration values for connecting to Azure IoT Hub are defined. Here, it sets the `CONNECTION_STRING`, which you need to replace with your actual device connection string from Azure IoT Hub, along with a unique `DEVICE_ID` and an `INTERVAL_SECONDS` value that determines how frequently the telemetry data is sent (in this case, every second). -Within the main function, the asynchronous send_telemetry function is executed using asyncio.run(), which manages the event loop and ensures that the asynchronous operations run correctly. This function call initiates the process of connecting to the IoT Hub, generating sensor readings, and transmitting telemetry data at regular intervals. The entire operation is wrapped in a try-except block to catch and log any errors that might occur during execution. +Within the main function, the asynchronous `send_telemetry` function is executed using `asyncio.run()`, which manages the event loop and ensures that the asynchronous operations run correctly. This function call initiates the process of connecting to the IoT Hub, generating sensor readings, and transmitting telemetry data at regular intervals. The entire operation is wrapped in a try-except block to catch and log any errors that might occur during execution. -Finally, the conditional check if __name__ == "__main__": ensures that the main function is called only when the script is executed directly, rather than when it is imported as a module in another script. This structure provides a clear and organized starting point for the application, making it easier to understand and maintain. +Finally, the conditional check `if __name__ == "__main__":` ensures that the main function is called only when the script is executed directly, rather than when it is imported as a module in another script. This structure provides a clear and organized starting point for the application, making it easier to understand and maintain. ## Connecting IoT Device to Azure IoT Hub To connect the Python application you developed earlier to Azure IoT Hub, follow these detailed steps. 1. Register a Device on Azure IoT Hub: * Open the Azure Portal, go to your IoT Hub, and click Devices under Device management: -![img6 alt-text#center](Figures/06.png) +![img6 alt-text#center](figures/06.png) * Click “Add Device”, enter a device ID (e.g., arm64Device01), and leave the authentication type as “Symmetric key.” -![img7 alt-text#center](Figures/07.png) +![img7 alt-text#center](figures/07.png) * Click “Save”. 2. Next, you’ll need to retrieve the connection string to integrate your Python application with Azure IoT Hub: * From the device list, select your newly created device (arm64Device01) * Copy the Primary Connection String from the device details page. You’ll need this connection string to authenticate your Python application when connecting and streaming telemetry data -![img8 alt-text#center](Figures/08.png) +![img8 alt-text#center](figures/08.png) Ensure this connection string is stored securely, as it provides authentication credentials for your device. In the next step, you’ll integrate this connection string into your Python simulator app, enabling secure communication and real-time data streaming to Azure IoT Hub ## Streaming Telemetry Data to Azure IoT Hub Now you’re ready to stream telemetry data from your Python application directly to Azure IoT Hub. Follow these steps to configure and run the application: -1. Open your Python IoT simulator script (iot_simulator.py). +1. Open your Python IoT simulator script `iot_simulator.py`. 2. Update the connection string. Replace the placeholder with your actual Azure IoT device primary connection string, which you retrieved from the Azure portal: ```python CONNECTION_STRING = "HostName=iot-hub-arm64.azure-devices.net" @@ -195,7 +195,7 @@ Ensure this connection string exactly matches the string copied from Azure IoT H python3 iot_simulator.py ``` -Upon successful execution, you should see output similar to the following, confirming a stable connection and periodic telemetry data transmission to Azure IoT Hub: +Upon successful execution, you should see output similar to what is shown below, confirming a stable connection and periodic telemetry data transmission to Azure IoT Hub: ```output 2025-03-16 19:39:12,944 - INFO - Creating client for connecting using MQTT over TCP @@ -240,11 +240,8 @@ Upon successful execution, you should see output similar to the following, confi Each telemetry message contains randomized sensor data (temperature, pressure, humidity), device ID, and a timestamp, providing realistic simulated data for IoT applications. -To stop streaming telemetry data, press Ctrl+C in the terminal. The application will gracefully disconnect from Azure IoT Hub. +To stop streaming telemetry data, press `Ctrl+C` in the terminal. The application will gracefully disconnect from Azure IoT Hub. This step completes the telemetry-streaming component of your Azure IoT application, laying the groundwork for subsequent steps like data processing, monitoring, and visualization -## Summary -In this part, you’ve successfully created and configured a Python IoT simulator application designed specifically for streaming realistic sensor data from Arm64-powered IoT devices. You’ve implemented a robust and reusable SensorReading class, generating randomized values for key sensor metrics—temperature, pressure, humidity—and timestamping these readings accurately. Additionally, you connected the Python application securely to your newly created Azure IoT Hub using the Azure IoT device SDK, establishing real-time telemetry data streaming capabilities. - -With this setup in place, your simulated IoT device continuously transmits data to Azure IoT Hub, providing a solid foundation to explore more advanced IoT scenarios, such as real-time data analytics, cloud storage, monitoring, alerts, and data visualization within the Azure ecosystem. \ No newline at end of file +With this setup in place, your simulated IoT device continuously transmits data to Azure IoT Hub, providing a solid foundation to explore more advanced IoT scenarios, such as real-time data analytics, cloud storage, monitoring, alerts, and data visualization within the Azure ecosystem. diff --git a/content/learning-paths/iot/azure-iot/Figures/01.png b/content/learning-paths/iot/azure-iot/figures/01.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/01.png rename to content/learning-paths/iot/azure-iot/figures/01.png diff --git a/content/learning-paths/iot/azure-iot/Figures/02.png b/content/learning-paths/iot/azure-iot/figures/02.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/02.png rename to content/learning-paths/iot/azure-iot/figures/02.png diff --git a/content/learning-paths/iot/azure-iot/Figures/03.png b/content/learning-paths/iot/azure-iot/figures/03.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/03.png rename to content/learning-paths/iot/azure-iot/figures/03.png diff --git a/content/learning-paths/iot/azure-iot/Figures/04.png b/content/learning-paths/iot/azure-iot/figures/04.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/04.png rename to content/learning-paths/iot/azure-iot/figures/04.png diff --git a/content/learning-paths/iot/azure-iot/Figures/05.png b/content/learning-paths/iot/azure-iot/figures/05.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/05.png rename to content/learning-paths/iot/azure-iot/figures/05.png diff --git a/content/learning-paths/iot/azure-iot/Figures/06.png b/content/learning-paths/iot/azure-iot/figures/06.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/06.png rename to content/learning-paths/iot/azure-iot/figures/06.png diff --git a/content/learning-paths/iot/azure-iot/Figures/07.png b/content/learning-paths/iot/azure-iot/figures/07.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/07.png rename to content/learning-paths/iot/azure-iot/figures/07.png diff --git a/content/learning-paths/iot/azure-iot/Figures/08.png b/content/learning-paths/iot/azure-iot/figures/08.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/08.png rename to content/learning-paths/iot/azure-iot/figures/08.png diff --git a/content/learning-paths/iot/azure-iot/Figures/09.png b/content/learning-paths/iot/azure-iot/figures/09.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/09.png rename to content/learning-paths/iot/azure-iot/figures/09.png diff --git a/content/learning-paths/iot/azure-iot/Figures/10.png b/content/learning-paths/iot/azure-iot/figures/10.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/10.png rename to content/learning-paths/iot/azure-iot/figures/10.png diff --git a/content/learning-paths/iot/azure-iot/Figures/11.png b/content/learning-paths/iot/azure-iot/figures/11.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/11.png rename to content/learning-paths/iot/azure-iot/figures/11.png diff --git a/content/learning-paths/iot/azure-iot/Figures/12.png b/content/learning-paths/iot/azure-iot/figures/12.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/12.png rename to content/learning-paths/iot/azure-iot/figures/12.png diff --git a/content/learning-paths/iot/azure-iot/Figures/13.png b/content/learning-paths/iot/azure-iot/figures/13.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/13.png rename to content/learning-paths/iot/azure-iot/figures/13.png diff --git a/content/learning-paths/iot/azure-iot/Figures/14.png b/content/learning-paths/iot/azure-iot/figures/14.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/14.png rename to content/learning-paths/iot/azure-iot/figures/14.png diff --git a/content/learning-paths/iot/azure-iot/Figures/15.png b/content/learning-paths/iot/azure-iot/figures/15.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/15.png rename to content/learning-paths/iot/azure-iot/figures/15.png diff --git a/content/learning-paths/iot/azure-iot/Figures/16.png b/content/learning-paths/iot/azure-iot/figures/16.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/16.png rename to content/learning-paths/iot/azure-iot/figures/16.png diff --git a/content/learning-paths/iot/azure-iot/Figures/17.png b/content/learning-paths/iot/azure-iot/figures/17.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/17.png rename to content/learning-paths/iot/azure-iot/figures/17.png diff --git a/content/learning-paths/iot/azure-iot/Figures/18.png b/content/learning-paths/iot/azure-iot/figures/18.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/18.png rename to content/learning-paths/iot/azure-iot/figures/18.png diff --git a/content/learning-paths/iot/azure-iot/Figures/19.png b/content/learning-paths/iot/azure-iot/figures/19.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/19.png rename to content/learning-paths/iot/azure-iot/figures/19.png diff --git a/content/learning-paths/iot/azure-iot/Figures/20.png b/content/learning-paths/iot/azure-iot/figures/20.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/20.png rename to content/learning-paths/iot/azure-iot/figures/20.png diff --git a/content/learning-paths/iot/azure-iot/Figures/21.png b/content/learning-paths/iot/azure-iot/figures/21.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/21.png rename to content/learning-paths/iot/azure-iot/figures/21.png diff --git a/content/learning-paths/iot/azure-iot/Figures/22.png b/content/learning-paths/iot/azure-iot/figures/22.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/22.png rename to content/learning-paths/iot/azure-iot/figures/22.png diff --git a/content/learning-paths/iot/azure-iot/Figures/23.png b/content/learning-paths/iot/azure-iot/figures/23.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/23.png rename to content/learning-paths/iot/azure-iot/figures/23.png diff --git a/content/learning-paths/iot/azure-iot/Figures/24.png b/content/learning-paths/iot/azure-iot/figures/24.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/24.png rename to content/learning-paths/iot/azure-iot/figures/24.png diff --git a/content/learning-paths/iot/azure-iot/Figures/25.png b/content/learning-paths/iot/azure-iot/figures/25.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/25.png rename to content/learning-paths/iot/azure-iot/figures/25.png diff --git a/content/learning-paths/iot/azure-iot/Figures/26.png b/content/learning-paths/iot/azure-iot/figures/26.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/26.png rename to content/learning-paths/iot/azure-iot/figures/26.png diff --git a/content/learning-paths/iot/azure-iot/Figures/27.png b/content/learning-paths/iot/azure-iot/figures/27.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/27.png rename to content/learning-paths/iot/azure-iot/figures/27.png diff --git a/content/learning-paths/iot/azure-iot/Figures/28.png b/content/learning-paths/iot/azure-iot/figures/28.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/28.png rename to content/learning-paths/iot/azure-iot/figures/28.png diff --git a/content/learning-paths/iot/azure-iot/Figures/29.png b/content/learning-paths/iot/azure-iot/figures/29.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/29.png rename to content/learning-paths/iot/azure-iot/figures/29.png diff --git a/content/learning-paths/iot/azure-iot/Figures/30.png b/content/learning-paths/iot/azure-iot/figures/30.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/30.png rename to content/learning-paths/iot/azure-iot/figures/30.png diff --git a/content/learning-paths/iot/azure-iot/Figures/31.png b/content/learning-paths/iot/azure-iot/figures/31.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/31.png rename to content/learning-paths/iot/azure-iot/figures/31.png diff --git a/content/learning-paths/iot/azure-iot/Figures/32.png b/content/learning-paths/iot/azure-iot/figures/32.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/32.png rename to content/learning-paths/iot/azure-iot/figures/32.png diff --git a/content/learning-paths/iot/azure-iot/Figures/33.png b/content/learning-paths/iot/azure-iot/figures/33.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/33.png rename to content/learning-paths/iot/azure-iot/figures/33.png diff --git a/content/learning-paths/iot/azure-iot/Figures/34.png b/content/learning-paths/iot/azure-iot/figures/34.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/34.png rename to content/learning-paths/iot/azure-iot/figures/34.png diff --git a/content/learning-paths/iot/azure-iot/Figures/35.png b/content/learning-paths/iot/azure-iot/figures/35.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/35.png rename to content/learning-paths/iot/azure-iot/figures/35.png diff --git a/content/learning-paths/iot/azure-iot/Figures/36.png b/content/learning-paths/iot/azure-iot/figures/36.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/36.png rename to content/learning-paths/iot/azure-iot/figures/36.png diff --git a/content/learning-paths/iot/azure-iot/Figures/37.png b/content/learning-paths/iot/azure-iot/figures/37.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/37.png rename to content/learning-paths/iot/azure-iot/figures/37.png diff --git a/content/learning-paths/iot/azure-iot/Figures/38.png b/content/learning-paths/iot/azure-iot/figures/38.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/38.png rename to content/learning-paths/iot/azure-iot/figures/38.png diff --git a/content/learning-paths/iot/azure-iot/Figures/39.png b/content/learning-paths/iot/azure-iot/figures/39.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/39.png rename to content/learning-paths/iot/azure-iot/figures/39.png diff --git a/content/learning-paths/iot/azure-iot/Figures/40.png b/content/learning-paths/iot/azure-iot/figures/40.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/40.png rename to content/learning-paths/iot/azure-iot/figures/40.png diff --git a/content/learning-paths/iot/azure-iot/Figures/41.png b/content/learning-paths/iot/azure-iot/figures/41.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/41.png rename to content/learning-paths/iot/azure-iot/figures/41.png diff --git a/content/learning-paths/iot/azure-iot/Figures/42.png b/content/learning-paths/iot/azure-iot/figures/42.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/42.png rename to content/learning-paths/iot/azure-iot/figures/42.png diff --git a/content/learning-paths/iot/azure-iot/Figures/43.png b/content/learning-paths/iot/azure-iot/figures/43.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/43.png rename to content/learning-paths/iot/azure-iot/figures/43.png diff --git a/content/learning-paths/iot/azure-iot/Figures/44.png b/content/learning-paths/iot/azure-iot/figures/44.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/44.png rename to content/learning-paths/iot/azure-iot/figures/44.png diff --git a/content/learning-paths/iot/azure-iot/Figures/45.png b/content/learning-paths/iot/azure-iot/figures/45.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/45.png rename to content/learning-paths/iot/azure-iot/figures/45.png diff --git a/content/learning-paths/iot/azure-iot/Figures/46.png b/content/learning-paths/iot/azure-iot/figures/46.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/46.png rename to content/learning-paths/iot/azure-iot/figures/46.png diff --git a/content/learning-paths/iot/azure-iot/Figures/47.png b/content/learning-paths/iot/azure-iot/figures/47.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/47.png rename to content/learning-paths/iot/azure-iot/figures/47.png diff --git a/content/learning-paths/iot/azure-iot/Figures/48.png b/content/learning-paths/iot/azure-iot/figures/48.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/48.png rename to content/learning-paths/iot/azure-iot/figures/48.png diff --git a/content/learning-paths/iot/azure-iot/Figures/49.png b/content/learning-paths/iot/azure-iot/figures/49.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/49.png rename to content/learning-paths/iot/azure-iot/figures/49.png diff --git a/content/learning-paths/iot/azure-iot/Figures/50.png b/content/learning-paths/iot/azure-iot/figures/50.png similarity index 100% rename from content/learning-paths/iot/azure-iot/Figures/50.png rename to content/learning-paths/iot/azure-iot/figures/50.png diff --git a/content/learning-paths/iot/azure-iot/iot-hub.md b/content/learning-paths/iot/azure-iot/iot-hub.md index f215e28989..756aa0cd85 100644 --- a/content/learning-paths/iot/azure-iot/iot-hub.md +++ b/content/learning-paths/iot/azure-iot/iot-hub.md @@ -1,6 +1,6 @@ --- # User change -title: "IoT Hub" +title: "Azure IoT Hub" weight: 3 @@ -19,19 +19,19 @@ In the following sections of this tutorial, you’ll learn how to create and con ## Create Azure IoT Hub Start by creating an Azure IoT Hub 1. Sign in to the Azure Portal: -* Open your web browser and go to portal.azure.com. -* Sign in using your Azure account credentials. + * Open your web browser and go to https://portal.azure.com + * Sign in using your Azure account credentials. 2. Create a new Azure IoT Hub resource * On the Azure Portal home page, select “Create a resource” at the top left as shown below -![img1 alt-text#center](Figures/01.png) +![img1 alt-text#center](figures/01.png) * In the Search services and marketplace box, type “IoT Hub” and press Enter. * Click on IoT Hub from the search results: -![img2 alt-text#center](Figures/02.png) +![img2 alt-text#center](figures/02.png) 3. Click the “Create” button: -![img3 alt-text#center](Figures/03.png) +![img3 alt-text#center](figures/03.png) 4. Configure Basic IoT Hub Settings * Subscription: Select your Azure subscription. @@ -39,7 +39,7 @@ Start by creating an Azure IoT Hub * IoT Hub Name: Enter a unique name for your IoT Hub (must be globally unique, e.g., iot-hub-arm-64). * Region: Select a region closest to your location or users. * Tier: Free. This will update the daily message limit accordingly: -![img4 alt-text#center](Figures/04.png) +![img4 alt-text#center](figures/04.png) 5. Click “Next: Networking”. 6. Configure Networking: @@ -56,7 +56,7 @@ Start by creating an Azure IoT Hub * Once deployed, you’ll see a message stating “Your deployment is complete”. * Click “Go to resource” to open the newly created Azure IoT Hub. 12. Check IoT Hub Overview and Details. From the IoT Hub overview page, verify important details such as the hub name, region, status, and hostname, which you’ll use to connect devices: -![img5 alt-text#center](Figures/05.png) +![img5 alt-text#center](figures/05.png) ## Next steps -Now that your Azure IoT Hub is ready, you can proceed to register and configure your IoT devices. In the next step, you’ll learn how to register an Arm64-based IoT device and start streaming data using Python and Azure IoT SDK. \ No newline at end of file +Now that your Azure IoT Hub is ready, you can proceed to register and configure your IoT devices. In the next step, you’ll learn how to register an Arm64-based IoT device and start streaming data using Python and Azure IoT SDK. diff --git a/content/learning-paths/iot/azure-iot/monitoring.md b/content/learning-paths/iot/azure-iot/monitoring.md index 13c71fa98e..7518d1d429 100644 --- a/content/learning-paths/iot/azure-iot/monitoring.md +++ b/content/learning-paths/iot/azure-iot/monitoring.md @@ -7,15 +7,14 @@ weight: 7 layout: "learningpathall" --- -## Objective -In the previous section, you successfully configured Azure Stream Analytics to store incoming IoT telemetry data securely in Azure Cosmos DB. The stored sensor data is now readily accessible for further analysis, monitoring, and action. In this section, you’ll enhance your IoT solution by implementing real-time data monitoring and alerting capabilities using Azure Functions. +In the previous section, you successfully configured Azure Stream Analytics to store incoming IoT telemetry data securely in Azure Cosmos DB. The stored sensor data is now readily accessible for further analysis, monitoring, and action. In this section, you will enhance your IoT solution by implementing real-time data monitoring and alerting capabilities using Azure Functions. -Azure Functions is a powerful, event-driven, serverless compute service provided by Azure, enabling you to execute custom code in response to specific events or triggers without the need to manage infrastructure. You’ll create an Azure Function that regularly queries temperature data from Cosmos DB, evaluates sensor readings against predefined thresholds, and sends notifications when critical values are exceeded—such as detecting overheating or environmental anomalies. By adding this functionality, you’ll build proactive monitoring into your IoT pipeline, ensuring timely responses to sensor data events and improving overall operational reliability. +Azure Functions is a powerful, event-driven, serverless compute service provided by Azure, enabling you to execute custom code in response to specific events or triggers without the need to manage infrastructure. You will create an Azure Function that regularly queries temperature data from Cosmos DB, evaluates sensor readings against predefined thresholds, and sends notifications when critical values are exceeded,such as detecting overheating or environmental anomalies. By adding this functionality, you will build proactive monitoring into your IoT pipeline, ensuring timely responses to sensor data events and improving overall operational reliability. ## Azure Functions -Azure Functions is a serverless computing platform provided by Microsoft Azure, designed to enable developers to run event-driven code without having to provision or manage infrastructure. With Azure Functions, you can easily create small, focused applications or services that automatically respond to events, such as database updates, HTTP requests, IoT sensor data events, or scheduled tasks. Because Azure Functions is serverless, it automatically scales based on workload, providing elasticity, rapid deployment, and simplified maintenance—developers only pay for resources actually consumed during execution. +Azure Functions is a serverless computing platform provided by Microsoft Azure, designed to enable developers to run event-driven code without having to provision or manage infrastructure. With Azure Functions, you can easily create small, focused applications or services that automatically respond to events, such as database updates, HTTP requests, IoT sensor data events, or scheduled tasks. Because Azure Functions is serverless, it automatically scales based on workload, providing elasticity, rapid deployment, and simplified maintenance, developers only pay for resources actually consumed during execution. -In IoT scenarios, Azure Functions are particularly valuable for responding to real-time data events, such as sensor readings exceeding specific thresholds. You can integrate Azure Functions seamlessly with services like Azure Cosmos DB, Azure IoT Hub, or Azure Notification Hubs, enabling functions to trigger automatically when new data is received or when certain conditions are met. This flexibility allows you to build responsive, cost-effective, and efficient IoT applications that require minimal setup yet offer highly scalable, real-time processing capabilities. +In IoT, Azure Functions are particularly valuable for responding to real-time data events, such as sensor readings exceeding specific thresholds. You can integrate Azure Functions seamlessly with services like Azure Cosmos DB, Azure IoT Hub, or Azure Notification Hubs, enabling functions to trigger automatically when new data is received or when certain conditions are met. This flexibility allows you to build responsive, cost-effective, and efficient IoT applications that require minimal setup yet offer highly scalable, real-time processing capabilities. ### Event-driven Architecture Azure Functions are inherently event-driven, meaning your code is automatically executed in direct response to specific events or triggers, without manual intervention. Each Azure Function remains dormant and consumes no resources until activated by a defined event, at which point the function is instantly executed. Common triggers include events like new data being written to Azure Cosmos DB, telemetry messages arriving in Azure IoT Hub, incoming HTTP requests, scheduled timers, or even queue-based messages. @@ -23,7 +22,7 @@ Azure Functions are inherently event-driven, meaning your code is automatically This event-driven design ensures real-time responsiveness, making Azure Functions especially well-suited for IoT scenarios, where timely reactions to incoming sensor data or critical events are crucial. For instance, an Azure Function can immediately activate upon detecting new sensor data in Cosmos DB, evaluate the data (such as checking whether temperature thresholds are exceeded), and promptly send alerts or trigger follow-up actions. ### Serverless and Scalability -Azure Functions is built on a serverless computing model, meaning you can execute custom code in response to specific events without having to provision or maintain any underlying server infrastructure. This approach enables developers to focus purely on application logic rather than spending time on managing servers, operating systems, or runtime environments. When an event—such as an HTTP request, database update, or new IoT sensor reading—occurs, Azure Functions automatically triggers your custom code, scales the necessary resources dynamically, and executes the function. +Azure Functions is built on a serverless computing model, meaning you can execute custom code in response to specific events without having to provision or maintain any underlying server infrastructure. This approach enables developers to focus purely on application logic rather than spending time on managing servers, operating systems, or runtime environments. When an event such as an HTTP request, database update, or new IoT sensor reading occurs, Azure Functions automatically triggers your custom code, scales the necessary resources dynamically, and executes the function. In the context of IoT solutions, the serverless model offered by Azure Functions is especially valuable because it can efficiently handle unpredictable workloads, scaling instantly as data volume fluctuates. Functions can scale out horizontally to accommodate spikes in IoT data without manual intervention, providing real-time responsiveness and reliability. This automatic scaling, coupled with a consumption-based billing model (paying only for resources actually consumed), makes Azure Functions an optimal choice for cost-effective, efficient, and responsive IoT monitoring, alerting, and analytics applications. @@ -36,12 +35,12 @@ Examples of triggers include: * Cosmos DB Trigger - runs whenever new data is added or updated in Cosmos DB. * IoT Hub/Event Hub Triggers - respond immediately to events like incoming IoT device messages. -In IoT scenarios, triggers tied to Cosmos DB are particularly powerful. For example, an Azure Function can automatically activate when new sensor readings are stored in Cosmos DB, allowing you to implement real-time monitoring, send immediate notifications, or perform analytics. Each function’s event-driven execution ensures your application remains highly responsive, efficient, and scalable—crucial for maintaining performance and cost-effectiveness in IoT solutions +In IoT scenarios, triggers tied to Cosmos DB are particularly powerful. For example, an Azure Function can automatically activate when new sensor readings are stored in Cosmos DB, allowing you to implement real-time monitoring, send immediate notifications, or perform analytics. Each function’s event-driven execution ensures your application remains highly responsive, efficient, and scalable, crucial for maintaining performance and cost-effectiveness in IoT solutions ### Azure Functions Bindings In addition to triggers, Azure Functions provide another powerful feature called bindings. Bindings allow you to effortlessly connect your functions to other Azure services or external resources, simplifying both input and output integration. Using bindings, you can directly access data from services like Azure Cosmos DB, Azure Blob Storage, Azure Queue Storage without the need to write custom integration code or manage connection logic manually. -Bindings greatly accelerate development, as you can easily read from or write to external services declaratively—just by defining simple configurations. For instance, when working with IoT solutions, you can configure a Cosmos DB input binding to automatically retrieve sensor data as documents, or set up an output binding to seamlessly persist aggregated data or alerts back to Cosmos DB or Azure Storage. This eliminates repetitive integration code, reduces complexity, and significantly enhances productivity. +Bindings greatly accelerate development, as you can easily read from or write to external services declaratively just by defining simple configurations. For instance, when working with IoT solutions, you can configure a Cosmos DB input binding to automatically retrieve sensor data as documents, or set up an output binding to seamlessly persist aggregated data or alerts back to Cosmos DB or Azure Storage. This eliminates repetitive integration code, reduces complexity, and significantly enhances productivity. Overall, Azure Function bindings simplify and speed up your development workflow, allowing you to focus entirely on your application logic rather than managing integration details @@ -50,19 +49,19 @@ When working with Azure Functions, there are two primary deployment approaches: Azure Portal provides a user-friendly interface to quickly create, configure, and manage Azure Functions directly from your web browser. It’s particularly suitable for simple scenarios or quick prototypes, as it requires minimal setup and no installation of additional tools. You can easily define triggers, bindings, environment variables, and monitor function execution directly through the portal interface. -However, there’s an important limitation for Python-based Azure Functions. If you choose Python with a Linux-based hosting plan (required for Python functions), the Azure Portal does not support in-portal code editing. In other words, while you can manage and monitor your Python functions in the portal, you can’t directly edit or modify the function’s Python source code there. +However, there’s an important limitation for Python based Azure Functions. If you choose Python with a Linux-based hosting plan (required for Python functions), the Azure Portal does not support in-portal code editing. In other words, while you can manage and monitor your Python functions in the portal, you can’t directly edit or modify the function’s Python source code there. To overcome this limitation, local development is highly recommended for Python-based Azure Functions. Local development involves developing and testing your Azure Functions on your own computer using the Azure Functions Core Tools and a IDE like Visual Studio Code. After development and local testing, you deploy your function to Azure using command-line tools (func CLI), IDE integrations, or continuous integration solutions such as GitHub Actions. -For Python functions on Linux-based plans, local development and deployment represent the best-practice approach, enabling you to efficiently create, debug, test, and manage more sophisticated IoT solutions. Therefore, in this section we will use local development. +For Python functions on Linux-based plans, local development and deployment represent the best-practice approach, enabling you to efficiently create, debug, test, and manage more sophisticated IoT solutions. Therefore, in this section you will use local development. ## Create an Azure Function App -We will start by creating an Azure Function App, in which we will create an Azure Function that regularly queries temperature data from Cosmos DB. In the next step, we will add the capability to send notifications, whenever the temperature reading exceeds a predefined threshold. Proceed as follows: +You will start by creating an Azure Function App, in which you will create an Azure Function that regularly queries temperature data from Cosmos DB. In the next step, upi will add the capability to send notifications, whenever the temperature reading exceeds a predefined threshold. Proceed as follows: 1. Sign in to the Azure Portal. 2. Click “Create a resource”, type “Function App”, and select it: -![img24 alt-text#center](Figures/24.png) +![img24 alt-text#center](figures/24.png) 3. Click Create, then select Consumption as a hosting option: -![img25 alt-text#center](Figures/25.png) +![img25 alt-text#center](figures/25.png) 4. Provide the required details: * Subscription: Your Azure subscription. * Resource Group: Select your existing IoT resource group. @@ -70,10 +69,10 @@ We will start by creating an Azure Function App, in which we will create an Azur * Runtime Stack: Select Python. * Version: Select 3.11 * Region: Select the same region as your Cosmos DB and IoT Hub. -* Operating System: Linux as Windows is unavailable for Python. +* Operating System: Select Linux as Windows is unavailable for Python. 5. Click Review + Create, and then Create. -![img26 alt-text#center](Figures/26.png) +![img26 alt-text#center](figures/26.png) ## Install Prerequisites Before writing the code make sure you have the following tools installed: @@ -87,7 +86,7 @@ Ensure Azure Functions Core Tools are properly installed by running: func --version ``` -The output would be like: +The output should look like: ```output func --version 4.0.6821 @@ -99,31 +98,31 @@ Ensure you also see a v4.x.x output, indicating compatibility with Python v2 mod Follow these steps to create an Azure Function locally using Visual Studio Code: 1. In Visual Studio Cod, click View->Command Palette... 2. Type "Create Function": -![img27 alt-text#center](Figures/27.png) +![img27 alt-text#center](figures/27.png) 3. Select Azure Functions: Create Function... -4. Select folder for your new function. For example create a new folder Arm.AzureIoT.AzureFunctions +4. Select folder for your new function. For example create a new folder `Arm.AzureIoT.AzureFunctions` 5. Visual Studio Code will display the wizard, which enables you to configure your function: -![img28 alt-text#center](Figures/28.png) +![img28 alt-text#center](figures/28.png) 6. Use this wizard to configure the function: -* Select a language: pick Python +* Select a language: Select Python * Select a Python programming model: Model V2 (Recommended) -* Select a Python interpreter to create a virtual environment: select python3 +* Select a Python interpreter to create a virtual environment: Select python3 * Select a template for your project's first function: CosmosDB trigger -* Name of the function you want to create: keep default (cosmosdb_trigger) -* Name of the container to be monitored: SensorReadings (or the one you created during Azure Cosmos DB provisioning) -* Name of the Cosmos DB database that includes the container to be monitored: IoTDatabaee (or the one you created during Azure Cosmos DB provisioning) -* Select the app setting with your Cosmos DB account connection string from "local.settings.json: Select + Create new local app setting +* Name of the function you want to create: Use the default (cosmosdb_trigger) +* Name of the container to be monitored: `SensorReadings` (or the one you created during Azure Cosmos DB provisioning) +* Name of the Cosmos DB database that includes the container to be monitored: `IoTDatabase` (or the one you created during Azure Cosmos DB provisioning) +* Select the app setting with your Cosmos DB account connection string from "local.settings.json": Select + Create new local app setting * Select your Azure subscription, and then select a database account (armiotcosmosdb or the one you used during Azure Cosmos DB provisioning) * Select how you would like to open your project: Open in current window Visual Studio Code will create the following files: -* function_app.py - this primary function code file. In the Azure Functions Python V2 programming model, bindings and triggers are defined using Python decorators directly within this file -* local.settings.json - this file is specifically used for local development, storing connection strings, app settings, and environment variables. It’s not deployed to Azure, so it’s safe to include sensitive data (like connection strings) locally for testing purpose. -* host.json - defines global configuration settings that affect the behavior of your entire Azure Functions application. Examples include function timeout limits, logging levels, and concurrency settings -* requirements.txt - this file lists all Python packages and dependencies required by your function app. Azure Functions uses this file to automatically install the necessary Python packages when deploying the function to Azure +* `function_app.py` - this primary function code file. In the Azure Functions Python V2 programming model, bindings and triggers are defined using Python decorators directly within this file +* `local.settings.json` - this file is specifically used for local development, storing connection strings, app settings, and environment variables. It’s not deployed to Azure, so it’s safe to include sensitive data (like connection strings) locally for testing purpose. +* `host.json` - defines global configuration settings that affect the behavior of your entire Azure Functions application. Examples include function timeout limits, logging levels, and concurrency settings +* `requirements.txt` - this file lists all Python packages and dependencies required by your function app. Azure Functions uses this file to automatically install the necessary Python packages when deploying the function to Azure ### Modify Function Code -You will now modify the function code to check whether the temperature of the new sensor readings is above a threshold. To do so, open the function_app.py and modify it as follows: +You will now modify the function code to check whether the temperature of the new sensor readings is above a threshold. To do so, open the `function_app.py` and modify it as follows: ```Python import azure.functions as func @@ -172,40 +171,41 @@ To run your function locally proceed as follows: func start ``` -3. Run the iot_simulator.py in a separate terminal window. +3. Run the `iot_simulator.py` in a separate terminal window. You should then see the following logs, depending on the generated temperature values: -![img29 alt-text#center](Figures/29.png) +![img29 alt-text#center](figures/29.png) ## Monitoring and Notifications -In this section, you'll extend your existing Azure Function to send email notifications using SendGrid whenever the temperature exceeds the defined threshold +In this section, you will extend your existing Azure Function to send email notifications using `SendGrid` whenever the temperature exceeds the defined threshold ### Create a SendGrid Account in Azure Follow these steps to create a SendGrid account: 1. Sign in to the Azure Portal. 2. Click “Create a resource” and search for SendGrid. -![img30 alt-text#center](Figures/30.png) +![img30 alt-text#center](figures/30.png) 3. Select Twilio SendGrid, choose the Free 100 (2022) plan, and then click Subscribe. 4. Provide the following details: * Subscription: Select your Azure subscription. * Resource group: Choose your existing IoT project resource group. * Name: Enter a descriptive name (e.g., iot-alerts-sendgrid). -![img31 alt-text#center](Figures/31.png) +![img31 alt-text#center](figures/31.png) 5. Click Review + subscribe and then Subscribe. 6. On the next screen, click Configure account now: -![img32 alt-text#center](Figures/32.png) +![img32 alt-text#center](figures/32.png) 7. Accept any permissions required by SendGrid, and then enter your details to create a sender identity: -![img33 alt-text#center](Figures/33.png) +![img33 alt-text#center](figures/33.png) 8. Fill out the required details, such as your name and email address. 9. After the sender identity is verified, click API Keys in the left menu: -![img34 alt-text#center](Figures/34.png) +![img34 alt-text#center](figures/34.png) 10. Click Create API Key. In the popup window, enter a key name (e.g., iot-api-key), and select Full Access. -![img35 alt-text#center](Figures/35.png) +![img35 alt-text#center](figures/35.png) 11. Copy the generated API key securely. You will not be able to retrieve it later. ### Configure SendGrid API Key in your Azure Function -Update your local.settings.json file to include the SendGrid API key: +Update your `local.settings.json` file to include the SendGrid API key as shown: +```console { "IsEncrypted": false, "Values": { @@ -215,11 +215,11 @@ Update your local.settings.json file to include the SendGrid API key: "SENDGRID_API_KEY": "" } } - -Replace with the actual key obtained earlier +``` +Replace `` with the actual key obtained earlier ### Install SendGrid Python Library -Update your project's requirements.txt to include the SendGrid Python SDK: +Update your project's `requirements.txt` to include the SendGrid Python SDK: ```console azure-functions sendgrid @@ -231,7 +231,7 @@ pip install -r requirements.txt ``` ### Extend Your Azure Function to Send Email Notifications -Modify your existing function_app.py file as follows: +Modify your existing `function_app.py` file as follows: ```python import azure.functions as func import logging @@ -286,14 +286,13 @@ def cosmosdb_trigger(azcosmosdb: func.DocumentList): ) ``` -The send_email_alert function is responsible for sending an email notification through SendGrid whenever a sensor reading exceeds the specified temperature threshold. It constructs an email message using details about the IoT device, including the device_id, current temperature, and the event timestamp. The function utilizes SendGrid’s Python SDK (SendGridAPIClient) to send the email message. If the email is successfully sent, it logs a confirmation with the status code. If the email fails, it captures and logs the error details, ensuring that any issues with email delivery can be easily identified and resolved. This function enables proactive monitoring by immediately alerting the user when potentially critical temperature conditions are detected, significantly enhancing the reliability and responsiveness of the IoT system +The `send_email_alert` function is responsible for sending an email notification through SendGrid whenever a sensor reading exceeds the specified temperature threshold. It constructs an email message using details about the IoT device, including the device_id, current temperature, and the event timestamp. The function utilizes SendGrid’s Python SDK (SendGridAPIClient) to send the email message. If the email is successfully sent, it logs a confirmation with the status code. If the email fails, it captures and logs the error details, ensuring that any issues with email delivery can be easily identified and resolved. This function enables proactive monitoring by immediately alerting the user when potentially critical temperature conditions are detected, significantly enhancing the reliability and responsiveness of the IoT system Now, start your function: ```console func start ``` -Then run the iot_simulator.py to send telemetry data, and wait for alert notifications. +Then run the `iot_simulator.py` to send telemetry data, and wait for alert notifications. -## Summary and Next Steps -You have now successfully configured real-time monitoring with email notifications. You can proceed to enhance your IoT solution by aggregating data and creating dashboards or visualizations. \ No newline at end of file +You have now successfully configured real-time monitoring with email notifications. You can proceed to enhance your IoT solution by aggregating data and creating dashboards or visualizations. diff --git a/content/learning-paths/iot/azure-iot/portal.md b/content/learning-paths/iot/azure-iot/portal.md index 9db8dbcc38..305855ca0e 100644 --- a/content/learning-paths/iot/azure-iot/portal.md +++ b/content/learning-paths/iot/azure-iot/portal.md @@ -8,13 +8,19 @@ layout: "learningpathall" --- ## Objective -We have successfully established the core backend components for our IoT solution. An IoT simulator continuously generates sensor data, streaming it securely to the cloud via Azure IoT Hub. These sensor readings are stored in Cosmos DB, ensuring data persistence and scalability. Additionally, we’ve implemented an Azure Function that can be triggered through HTTP requests to query Cosmos DB and calculate the average temperature from recent sensor data. With these underlying services fully operational, we’re now prepared to build a WWW portal that will visually present real-time temperature information to our end-users. +You have successfully established the core backend components for our IoT solution. An IoT simulator continuously generates sensor data, streaming it securely to the cloud via Azure IoT Hub. These sensor readings are stored in Cosmos DB, ensuring data persistence and scalability. Additionally, you have implemented an Azure Function that can be triggered through HTTP requests to query Cosmos DB and calculate the average temperature from recent sensor data. With these underlying services fully operational, you can now build a web portal that will visually present real-time temperature information to end users. ## Website -Start by creating a new folder named Arm.AzureIoT.Portal, inside which you’ll create three files: index.html, main.js, and styles.css. The first file, index.html will define the structure of the webpage, and contain the HTML markup and links to the JavaScript and CSS files. The second file, main.js will include the logic and interactivity of the webpage. In this project, it will handle fetching temperature data from your Azure Function and updating the displayed content dynamically. The last file, styles.css will contain all the styling information, controlling the visual appearance of your webpage. +Start by creating a new folder named `Arm.AzureIoT.Portal`, inside which you will create three files: `index.html`, `main.js`, and `styles.css`. + + *`index.html` will define the structure of the webpage, and contain the HTML markup and links to the JavaScript and CSS files. + + *`main.js` will include the logic and interactivity of the webpage. In this project, it will handle fetching temperature data from your Azure Function and updating the displayed content dynamically. + + *`styles.css` will contain all the styling information, controlling the visual appearance of your webpage. ### styles.css -Modify your styles.css file by adding the following CSS: +Modify your `styles.css` file by adding the following CSS: ```css body, html { margin: 0; @@ -57,15 +63,16 @@ body, html { } ``` -The provided CSS sets a modern, dark-themed appearance for your IoT portal webpage. Here’s a breakdown of its styling: -* body and html. The styles remove default margins and paddings, define a dark background color (#121212), set the text color to white for high contrast, and center content both horizontally and vertically using Flexbox. -* .container - this creates a central container element with padding for spacing, rounded corners (border-radius: 8px) for a softer look, a subtle shadow effect for depth, and a slightly lighter dark background (#1e1e1e) to distinguish the content area from the main page background. -* h1 - this defines the main title style with increased font size (2.5rem) and additional spacing below to clearly separate the title from other content. -* button - styles the interactive “Get Temperature” button, giving it a blue color (#1e88e5), white text for readability, rounded corners for a friendly appearance, and smooth color-transition effects when hovered to improve user experience. -* .result - formats the text area where the temperature reading will appear, adding sufficient margin for clear spacing and slightly larger text size to make the results easily readable. +The provided CSS sets a modern, dark-themed appearance for your IoT portal webpage. Here is a breakdown of its styling: + + * body and html. The styles remove default margins and paddings, define a dark background color (#121212), set the text color to white for high contrast, and center content both horizontally and vertically using Flexbox. + * `.container` - this creates a central container element with padding for spacing, rounded corners (border-radius: 8px) for a softer look, a subtle shadow effect for depth, and a slightly lighter dark background (#1e1e1e) to distinguish the content area from the main page background. + * `h1` - this defines the main title style with increased font size (2.5rem) and additional spacing below to clearly separate the title from other content. + * `button` - styles the interactive “Get Temperature” button, giving it a blue color (#1e88e5), white text for readability, rounded corners for a friendly appearance, and smooth color-transition effects when hovered to improve user experience. + * `.result` - formats the text area where the temperature reading will appear, adding sufficient margin for clear spacing and slightly larger text size to make the results easily readable. ### main.js -Now, open your main.js file and update it with the following JavaScript code: +Now, open your `main.js` file and update it with the following JavaScript code: ```JavaScript const functionUrl = ""; @@ -125,59 +132,59 @@ In the body section we have: * a button (id="getTempBtn") that users click to trigger the JavaScript logic retrieving temperature data from your Azure Function. * a placeholder label (div) with the id "result" initially showing "Temperature: -- °C". The JavaScript updates this label dynamically with the actual temperature retrieved from your backend. -Finally, the index.html includes the JavaScript file (main.js) placed at the end of the body to ensure the HTML elements are fully loaded before executing scripts. +Finally, the `index.html` includes the JavaScript file (main.js) placed at the end of the body to ensure the HTML elements are fully loaded before executing scripts. ## Testing the Implementation: -Make sure you’ve saved all three files (index.html, main.js, and styles.css). Next: +Make sure you have saved all three files (index.html, main.js, and styles.css). Next: 1. Start the IoT Simulator to begin streaming data to the Azure IoT Hub. 2. Open the index.html file locally in your web browser. 3. Click the "Get temperature" button. You should now see real-time temperature readings displayed: -![img44 alt-text#center](Figures/44.png) +![img44 alt-text#center](figures/44.png) ## Deployment to Azure Blob Storage -You will now deploy the web portal you’ve created to Azure Blob Storage, making it accessible online. +You will now deploy the web portal you have created to Azure Blob Storage, making it accessible online. ### Create and Configure Azure Blob Storage 1. Sign in to the Azure Portal. 2. Create a Storage Account: * Click “Create a resource” * Search for “Storage account” -![img45 alt-text#center](Figures/45.png) +![img45 alt-text#center](figures/45.png) * Click “Create”. -![img46 alt-text#center](Figures/46.png) +![img46 alt-text#center](figures/46.png) 3. Provide required details: * Subscription, resource group, storage account name (e.g. armiotstorage). * For Primary service, choose Azure Blob Storage or Azure Data Lake Storage Gen 2. * Select Standard performance and Locally-redundant storage (LRS). -![img47 alt-text#center](Figures/47.png) +![img47 alt-text#center](figures/47.png) * Click "Review + create", then "Create". 3. Enable Static Website Hosting: * Navigate to your newly created storage account. * Under Data management, click “Static website”. * Select “Enabled”. * Set index.html as the index document name. -![img48 alt-text#center](Figures/48.png) +![img48 alt-text#center](figures/48.png) * Click Save. After saving, Azure provides you with a URL like: https://.z22.web.core.windows.net/. Make sure to save this URL, as it will serve as the public endpoint for your website. ### Upload Files to Azure Blob Storage -You can upload your website files directly using the Azure Portal or via Azure Storage Explorer. Here, we’ll use the Azure Portal: +You can upload your website files directly using the Azure Portal or via Azure Storage Explorer. Here, you will use the Azure Portal: 1. Navigate to your storage account. 2. Under Data storage, select “Containers”. 3. Open the container named ”$web” (created automatically when enabling static websites). 4. Click Upload and select your three website files (index.html, main.js, styles.css), and upload them. -![img49 alt-text#center](Figures/49.png) +![img49 alt-text#center](figures/49.png) ### Verify the Deployment After uploading your files, open a browser and navigate to https://.z22.web.core.windows.net/. Your static website should load, allowing you to test the “Get temperature” button (to see temperatures make sure to start the IoT simulator): -![img50 alt-text#center](Figures/50.png) +![img50 alt-text#center](figures/50.png) ## Summary -In this learning path, we successfully built a complete, end-to-end prototype of an IoT solution. Our journey began with a simulator streaming realistic telemetry data to Azure through IoT Hub. We leveraged Azure Stream Analytics to process and route this streaming data directly into Cosmos DB, providing scalable and reliable storage. Additionally, we developed two Azure Functions: the first continuously monitors incoming temperature readings, sending email notifications whenever the temperature exceeds a predefined threshold, ensuring proactive alerts. The second Azure Function aggregates recent temperature data from the last minute and provides this information via an HTTP endpoint. Finally, we utilized this aggregation function within our user-friendly web portal, enabling real-time visualization of temperature data, thus completing our robust and interactive IoT solution. +In this learning path, you successfully built a complete, end-to-end prototype of an IoT solution. YOu started with a simulator streaming realistic telemetry data to Azure through IoT Hub. You used Azure Stream Analytics to process and route this streaming data directly into Cosmos DB, providing scalable and reliable storage. Additionally, you developed two Azure Functions: the first continuously monitors incoming temperature readings, sending email notifications whenever the temperature exceeds a predefined threshold, ensuring proactive alerts. The second Azure Function aggregates recent temperature data from the last minute and provides this information via an HTTP endpoint. Finally, you utilized this aggregation function within our user-friendly web portal, enabling real-time visualization of temperature data, thus building out a complete IoT solution. diff --git a/content/learning-paths/iot/azure-iot/stream-analytics-dynamo-db.md b/content/learning-paths/iot/azure-iot/stream-analytics-dynamo-db.md index 6672e3d3b0..e6c1c6c8c3 100644 --- a/content/learning-paths/iot/azure-iot/stream-analytics-dynamo-db.md +++ b/content/learning-paths/iot/azure-iot/stream-analytics-dynamo-db.md @@ -7,20 +7,19 @@ weight: 6 layout: "learningpathall" --- -## Objective -In the previous section, you successfully set up an Azure Stream Analytics job and configured Azure IoT Hub as an input source. You implemented a simple query to stream real-time sensor data directly from IoT Hub, establishing a seamless flow of telemetry data into Azure. Now, you’ll take the next step to persist this streaming data in Azure Cosmos DB. +In the previous section, you successfully set up an Azure Stream Analytics job and configured Azure IoT Hub as an input source. You implemented a simple query to stream real-time sensor data directly from IoT Hub, establishing a seamless flow of telemetry data into Azure. Now, you will take the next step to persist this streaming data in Azure Cosmos DB. ## Azure Cosmos DB Azure Cosmos DB is a fully managed, globally distributed NoSQL database service designed for scalability, reliability, and high availability. Its flexible data schema allows for easy storage of diverse data types from multiple IoT devices without requiring rigid schema definitions. This schema flexibility is especially valuable in IoT scenarios, where sensors and devices may send varied or evolving data structures. -Consider a scenario where your existing IoT devices stream telemetry data (temperature, pressure, humidity) to Azure Cosmos DB via Azure Stream Analytics. Now imagine that you need to integrate a new sensor type—such as an air-quality sensor that provides an additional measurement (e.g., “AirQualityIndex”). +Consider a scenario where your existing IoT devices stream telemetry data (temperature, pressure, humidity) to Azure Cosmos DB via Azure Stream Analytics. Now imagine that you need to integrate a new sensor type, such as an air-quality sensor that provides an additional measurement (example: AirQualityIndex). -With Azure Cosmos DB’s NoSQL architecture, you don’t have to explicitly modify or migrate database schemas when introducing new data fields. The new sensor data can simply be included in your Stream Analytics query, and Cosmos DB will automatically store the additional field alongside existing data entries without any extra setup. +With Azure Cosmos DB’s NoSQL architecture, you do not have to explicitly modify or migrate database schemas when introducing new data fields. The new sensor data can simply be included in your Stream Analytics query, and Cosmos DB will automatically store the additional field alongside existing data entries without any extra setup. ### Partitioning Azure Cosmos DB uses partitioning for efficiently managing large-scale data and high-throughput operations. Partitioning distributes data across multiple servers (physical partitions), allowing Cosmos DB to scale seamlessly as data volume and query demands grow. -Cosmos DB uses a partition key—a field chosen by the user—to determine how data is distributed across partitions. The partition key should ideally have a high cardinality (many distinct values) and should evenly distribute read and write workloads. Each unique value of the partition key corresponds to a logical grouping called a logical partition. Documents with the same partition key value reside within the same logical partition. +Cosmos DB uses a partition key, a field chosen by the user, to determine how data is distributed across partitions. The partition key should ideally have a high cardinality (many distinct values) and should evenly distribute read and write workloads. Each unique value of the partition key corresponds to a logical grouping called a logical partition. Documents with the same partition key value reside within the same logical partition. When you select a suitable partition key, Cosmos DB ensures that operations (reads and writes) targeting a specific logical partition perform efficiently because queries can quickly locate and retrieve data without scanning the entire dataset. If your data and workload scale significantly, Cosmos DB transparently manages splitting and distributing data across additional physical partitions automatically. Therefore, selecting an effective partition key (such as deviceId for IoT scenarios) can greatly optimize performance, scalability, and cost efficiency. @@ -31,12 +30,12 @@ In Azure Cosmos DB, scaling relies on partitions, allowing the database to handl When data volume or throughput requirements grow beyond the capacity of a single logical partition, Cosmos DB transparently distributes these logical partitions across multiple physical partitions (servers). As data volume and workload increase, Cosmos DB dynamically creates additional physical partitions, automatically redistributing your logical partitions across them. This horizontal scaling ensures that read and write operations remain fast and efficient, even as the database size and traffic significantly grow. -Efficient scaling is directly linked to choosing a suitable partition key. Selecting a good partition key ensures that your data and workload are evenly balanced across physical partitions. Good partitioning prevents hotspots—scenarios where a single partition disproportionately handles more workload than others—which can lead to performance bottlenecks. Thus, careful selection of partition keys (such as device ID in IoT scenarios) allows Cosmos DB to scale smoothly and maintain high performance and reliability, regardless of how much your data or traffic grows. +Efficient scaling is directly linked to choosing a suitable partition key. Selecting a good partition key ensures that your data and workload are evenly balanced across physical partitions. Good partitioning prevents hotspot scenarios where a single partition disproportionately handles more workload than others, which can lead to performance bottlenecks. Thus, careful selection of partition keys (such as device ID in IoT scenarios) allows Cosmos DB to scale smoothly and maintain high performance and reliability, regardless of how much your data or traffic grows. ### Data Operations -In Azure Cosmos DB, all data operations—such as retrieval, update, insert, and delete—consume units of measure called Request Units (RUs). Request Units represent the resources required to perform operations such as reads, writes, queries, and updates. Each operation consumes a certain number of RUs, and Cosmos DB uses this concept to provide predictable and consistent performance. +In Azure Cosmos DB, all data operations, such as retrieval, update, insert, and delete, consume units of measure called Request Units (RUs). Request Units represent the resources required to perform operations such as reads, writes, queries, and updates. Each operation consumes a certain number of RUs, and Cosmos DB uses this concept to provide predictable and consistent performance. -When you perform a data operation in Cosmos DB, the request is routed to the appropriate partition based on the partition key provided. If you’re retrieving or updating a single document by specifying its unique id and partition key value, Cosmos DB efficiently locates the data within a single logical partition, resulting in minimal RU consumption. However, cross-partition queries—queries spanning multiple logical partitions—consume more RUs because Cosmos DB must query multiple partitions simultaneously. +When you perform a data operation in Cosmos DB, the request is routed to the appropriate partition based on the partition key provided. If you are retrieving or updating a single document by specifying its unique id and partition key value, Cosmos DB efficiently locates the data within a single logical partition, resulting in minimal RU consumption. However, cross-partition queries, which are queries spanning multiple logical partitions, consume more RUs because Cosmos DB must query multiple partitions simultaneously. As your application workload increases, Cosmos DB manages scalability through partitions. Increasing throughput (i.e., RU/s) allocates more resources to your container, automatically distributing the load across existing or newly created physical partitions. Conversely, if your workload decreases, you can scale down to reduce costs. Since Cosmos DB automatically handles partition management behind the scenes, data remains available and responsive with minimal intervention. @@ -52,11 +51,11 @@ You will now configure the stream analytics job such that the telemetry data wil Start by creating the Create Cosmos DB account and database: 1. Log in to the Azure Portal. 2. Select “Create a resource”, search for “Azure Cosmos DB”, and click Create: -![img14 alt-text#center](Figures/14.png) -![img15 alt-text#center](Figures/15.png) +![img14 alt-text#center](figures/14.png) +![img15 alt-text#center](figures/15.png) 3. Select Azure Cosmos DB for NoSQL, then click Create. -![img16 alt-text#center](Figures/16.png) +![img16 alt-text#center](figures/16.png) 4. Fill in the required details: * Subscription: Select your subscription. @@ -67,7 +66,7 @@ Start by creating the Create Cosmos DB account and database: * Select Servleress as capacity mode. * Apply Free Tier Discount: Apply * Check Limit total account throughput. -![img17 alt-text#center](Figures/17.png) +![img17 alt-text#center](figures/17.png) 5. Click Review + create, then click Create. Once the deployment completes: @@ -76,19 +75,19 @@ Once the deployment completes: * Select an appropriate partition key (recommended: /deviceId). * Enable analytical store capability to perform near real-time analytics on your operational data, without impacting the performance of transactional workloads: Off. * Click OK at the bottom. -![img18 alt-text#center](Figures/18.png) +![img18 alt-text#center](figures/18.png) ### Modify Stream Analytics Job Now update your query in Stream Analytics to write data from IoT Hub directly into Cosmos DB: -1. Go to IoTStreamAnalyticsJob. +1. Go to `IoTStreamAnalyticsJob`. 2. Under Job topology, select Outputs. 3. Click Add output, and select Cosmos DB: -![img19 alt-text#center](Figures/19.png) +![img19 alt-text#center](figures/19.png) 4. In the Cosmos DB pane, type CosmosDBOutput for the alias name, leave other fields at their default values, and click the Save button: -![img20 alt-text#center](Figures/20.png) +![img20 alt-text#center](figures/20.png) ### Update Your Stream Analytics Query -Now once, you have the output configured, modify the query. To do so, select Query under Job topology. Then, modify your existing query to explicitly specify your Cosmos DB output alias: +Now that you have the output configured, lets modify the query. To do so, select Query under Job topology. Then, modify your existing query to explicitly specify your Cosmos DB output alias: ```SQL SELECT @@ -103,16 +102,16 @@ FROM IoTHubInput ``` -![img21 alt-text#center](Figures/21.png) +![img21 alt-text#center](figures/21.png) Afterwards, click Start job, and then Start: -![img22 alt-text#center](Figures/22.png) +![img22 alt-text#center](figures/22.png) ## Verify data flow in Cosmos DB -To verify that your data pipeline is working correctly, first start your Python IoT simulator application (iot_simulator.py). Ensure it’s actively sending telemetry data. Next, open the Azure Portal and navigate to your Azure Cosmos DB resource. Under Data Explorer, select your database and then your container (e.g., SensorReadings). Once selected, click Items to view your stored data. Sensor readings streamed from your IoT device will appear on the right-hand side of the Data Explorer interface, similar to the screenshot below: +To verify that your data pipeline is working correctly, first start your Python IoT simulator application `iot_simulator.py`. Ensure it’s actively sending telemetry data. Next, open the Azure Portal and navigate to your Azure Cosmos DB resource. Under Data Explorer, select your database and then your container (e.g., SensorReadings). Once selected, click Items to view your stored data. Sensor readings streamed from your IoT device will appear on the right-hand side of the Data Explorer interface, similar to the screenshot below: -![img23 alt-text#center](Figures/23.png) +![img23 alt-text#center](figures/23.png) Azure Cosmos DB stores data as JSON documents within a NoSQL (document-based) structure, making it ideal for flexible and dynamic data, such as IoT telemetry. Each record (also called a document) is stored in a container (or collection) that doesn’t enforce a rigid schema. As a result, each document can contain different fields without requiring schema changes or migrations, which is particularly valuable when collecting data from diverse IoT devices with evolving attributes. @@ -144,7 +143,5 @@ Our defined fields (deviceId, temperature, pressure, humidity, and timestamp) co When storing data, Cosmos DB uses the provided partition key (such as deviceId) to evenly distribute documents across logical partitions. This optimizes retrieval speed and scalability, especially in IoT scenarios where queries often target specific devices. This approach helps ensure efficient data management and high performance, even as your data volume and throughput requirements increase. -## Summary and Next Steps -In this section, you successfully configured your Azure Stream Analytics job to persist real-time sensor data from Azure IoT Hub into Azure Cosmos DB. You defined and executed a query that captures and stores streaming telemetry in a flexible, schema-free JSON document structure, taking advantage of Cosmos DB’s powerful and dynamic data-storage capabilities. By completing these steps, you’ve built a data pipeline that streams sensor readings from your simulated IoT devices directly into a scalable, highly performant NoSQL database. You’re now prepared to extend your solution further, enabling data analytics, alerting, monitoring, and visualization. -Now that you’ve successfully configured Azure Stream Analytics to store telemetry data in Azure Cosmos DB, the next step is to implement data monitoring and alerting. You’ll use Azure Functions, serverless compute service, to automatically monitor stored sensor data. Specifically, you’ll create an Azure Function that periodically reads temperature values from Cosmos DB, evaluates them against defined thresholds, and sends notifications whenever these thresholds are exceeded. +Now that you have successfully configured Azure Stream Analytics to store telemetry data in Azure Cosmos DB, the next step is to implement data monitoring and alerting. You will use Azure Functions, serverless compute service, to automatically monitor stored sensor data. Specifically, you will create an Azure Function that periodically reads temperature values from Cosmos DB, evaluates them against defined thresholds, and sends notifications whenever these thresholds are exceeded. diff --git a/content/learning-paths/iot/azure-iot/stream-analytics.md b/content/learning-paths/iot/azure-iot/stream-analytics.md index a25fb69c97..c650f43b1d 100644 --- a/content/learning-paths/iot/azure-iot/stream-analytics.md +++ b/content/learning-paths/iot/azure-iot/stream-analytics.md @@ -8,7 +8,7 @@ layout: "learningpathall" --- ## Objective -In the previous step, you successfully established a Python application to stream real-time telemetry data from an Arm64-powered IoT device directly to Azure IoT Hub. Now, you’ll leverage Azure Stream Analytics, a powerful, real-time analytics and complex-event processing engine, to process and route the streaming sensor data. Stream Analytics allows you to easily analyze incoming data streams, run queries in real-time, and seamlessly integrate the processed data into other Azure services. With Stream Analytics, you’ll define custom queries to view or transform sensor readings, such as temperature, pressure, humidity, and timestamps, and efficiently direct this information to storage, analytics services, or visualization tools. In this section, you’ll set up Stream Analytics to ingest telemetry data from IoT Hub, and run continuous queries to process this data. +In the previous step, you successfully established a Python application to stream real-time telemetry data from an Arm64 powered IoT device directly to Azure IoT Hub. Now, you will leverage Azure Stream Analytics, a powerful, real-time analytics and complex-event processing engine, to process and route the streaming sensor data. Stream Analytics allows you to easily analyze incoming data streams, run queries in real-time, and seamlessly integrate the processed data into other Azure services. With Stream Analytics, you will define custom queries to view or transform sensor readings, such as temperature, pressure, humidity, and timestamps, and efficiently direct this information to storage, analytics services, or visualization tools. In this section, you will set up Stream Analytics to ingest telemetry data from IoT Hub, and run continuous queries to process this data. ## Azure Stream Analytics Azure Stream Analytics is a real-time analytics and event-processing service provided by Microsoft Azure, specifically designed to handle large volumes of streaming data from IoT devices, applications, sensors, and other real-time sources. It enables developers and data engineers to create sophisticated analytics pipelines without the complexity of managing infrastructure. By processing data on the fly, Azure Stream Analytics helps users extract immediate insights, detect patterns, trigger alerts, and feed processed information into other Azure services like Azure Cosmos DB, Azure Functions, or Power BI dashboards. @@ -18,11 +18,11 @@ With Azure Stream Analytics, you can write queries using a simple SQL-like synta Azure Stream Analytics organizes real-time data processing through four main architectural components: Jobs, Inputs, Queries, and Outputs. A Job in Azure Stream Analytics serves as a logical container that encapsulates all aspects of your stream-processing workflow. Each job manages streaming data from start to finish and can be independently started, stopped, or scaled as needed. Within a job, Inputs define the sources of streaming data, typically coming from services like Azure IoT Hub, Event Hubs, or Blob Storage. Queries, written in a familiar SQL-like syntax, specify how the incoming data should be filtered, aggregated, or transformed in real-time. Finally, the processed data flows into Outputs, which can include Azure Cosmos DB, Blob Storage, Azure SQL Database, Azure Functions, or Power BI, making it readily available for further analysis, monitoring, or visualization. ## Create a Stream Analytics Job -To process and analyze the telemetry data we’re streaming to Azure IoT Hub, we’ll first create an Azure Stream Analytics job. Follow these steps to set it up: +To process and analyze the telemetry data you are streaming to Azure IoT Hub, you will first create an Azure Stream Analytics job. Follow these steps to set it up: 1. Sign in to the Azure Portal. 2. Click “Create a resource”, type “Stream Analytics job” into the search box, and press Enter. 3. From the search results, select Stream Analytics job, then click Create: -![img9 alt-text#center](Figures/09.png) +![img9 alt-text#center](figures/09.png) 4. Provide the necessary information: * Subscription: Choose the Azure subscription you want to use for this job. * Resource group: Select the resource group you previously created (e.g., your IoT project’s resource group). @@ -31,32 +31,32 @@ To process and analyze the telemetry data we’re streaming to Azure IoT Hub, we * Hosting environment: Select Cloud for Azure-managed infrastructure. * Streaming units: Set this to 1 (appropriate for initial testing and smaller workloads, you can scale up later). -![img10 alt-text#center](Figures/10.png) +![img10 alt-text#center](figures/10.png) 5. After reviewing your settings carefully, click Review + create, confirm that all details are correct, and then click Create to deploy your Azure Stream Analytics job. Your Stream Analytics job will deploy within a few minutes. Once the deployment is complete, you’ll be ready to configure inputs, define queries, and set outputs for real-time data processing and analytics. ## Configure Azure IoT Hub as an Input for Stream Analytics Job -After successfully creating the Stream Analytics job, you’ll need to configure your Azure IoT Hub as an input source. This configuration allows Stream Analytics to read real-time telemetry data directly from your IoT devices. Follow these steps: +After successfully creating the Stream Analytics job, you will need to configure your Azure IoT Hub as an input source. This configuration allows Stream Analytics to read real-time telemetry data directly from your IoT devices. Follow these steps: 1. Navigate to your newly created Stream Analytics job in the Azure Portal. 2. In the left-hand menu, under the Job topology section, select Inputs. 3. Click “Add input”, and choose “IoT Hub” as the input type. -![img11 alt-text#center](Figures/11.png) +![img11 alt-text#center](figures/11.png) 4. Enter the following configuration details: -* Input Alias: Provide a name, such as IoTHubInput. +* Input Alias: Provide a name, such as `IoTHubInput`. * IoT Hub: Select the Azure IoT Hub you created earlier. -* Consumer group: Choose $Default, unless you’ve explicitly created a custom consumer group. -* Shared access policy name: Select iothubowner (provides full access for reading data from IoT Hub). +* Consumer group: Choose `$Default`, unless you have explicitly created a custom consumer group. +* Shared access policy name: Select `iothubowner` (provides full access for reading data from IoT Hub). * Endpoint: Select Messaging. -* Partition key: Type deviceId (this helps ensure the data streams are partitioned by device identifiers). -* Event serialization format: Select JSON, as our telemetry data is transmitted in JSON format. -* Encoding: Choose UTF-8. +* Partition key: Type `deviceId` (this helps ensure the data streams are partitioned by device identifiers). +* Event serialization format: Select `JSON`, as our telemetry data is transmitted in `JSON` format. +* Encoding: Choose `UTF-8`. * Event compression type: Set this to None. -![img12 alt-text#center](Figures/12.png) +![img12 alt-text#center](figures/12.png) -4. After entering these details, carefully verify them for accuracy and completeness. Click “Save” to apply the changes and successfully link your Azure IoT Hub as the input source for your Stream Analytics job. +4. After entering these details, carefully verify them for accuracy and completeness. Click Save to apply the changes and successfully link your Azure IoT Hub as the input source for your Stream Analytics job. Your job is now configured to ingest streaming IoT telemetry data in real-time, preparing it for further analysis, storage, and visualization. @@ -64,7 +64,7 @@ Your job is now configured to ingest streaming IoT telemetry data in real-time, Now that you have configured your Azure IoT Hub as an input source, the next step is to create and run a Stream Analytics query. This query defines how incoming IoT data will be filtered, transformed, or routed for further processing. Follow these steps: 1. Navigate to your Stream Analytics job in the Azure Portal. 2. Under the Job topology menu on the left, select Query. -3. In the query editor, enter the following simple SQL-like query to stream all incoming data from your IoT device +3. In the query editor, enter the following simple `SQL-like` query to stream all incoming data from your IoT device ```SQL SELECT deviceId, @@ -78,15 +78,12 @@ FROM This straightforward query selects all relevant fields (deviceId, temperature, pressure, humidity, and timestamp) directly from your configured input (IoTHubInput), which corresponds to the Azure IoT Hub you previously connected. -Before running this query, ensure your Python IoT simulator (iot_simulator.py) is actively sending telemetry data. After the simulator begins transmitting data, you can test and verify your Stream Analytics query within the Azure Portal using the built-in Test query feature. Doing this allows you to view live-streamed sensor data in real-time and confirm that your streaming pipeline is working as expected: +Before running this query, ensure your Python IoT simulator `iot_simulator.py` is actively sending telemetry data. After the simulator begins transmitting data, you can test and verify your Stream Analytics query within the Azure Portal using the built-in Test query feature. Doing this allows you to view live-streamed sensor data in real-time and confirm that your streaming pipeline is working as expected: -![img13 alt-text#center](Figures/13.png) +![img13 alt-text#center](figures/13.png) -Constructing queries in Azure Stream Analytics involves using a straightforward SQL-like syntax specifically optimized for real-time stream processing. Typically, a query contains a SELECT statement to specify which fields from the incoming telemetry data to process, and a FROM statement to indicate the source stream. Queries can be expanded with advanced features such as filtering, aggregations, and temporal window functions to handle more complex scenarios. +Constructing queries in Azure Stream Analytics involves using a straightforward SQL-like syntax specifically optimized for real-time stream processing. Typically, a query contains a `SELECT` statement to specify which fields from the incoming telemetry data to process, and a `FROM` statement to indicate the source stream. Queries can be expanded with advanced features such as filtering, aggregations, and temporal window functions to handle more complex scenarios. -In our current example, we implemented a simple query using SELECT and FROM clauses to view and forward all incoming sensor data from our IoT device without any additional filtering or transformation. This straightforward query effectively demonstrates how Azure Stream Analytics ingests and processes real-time IoT data, establishing a foundation upon which you can build more advanced and powerful data analytics workflows in the future +In the current example, you implemented a simple query using `SELECT` and `FROM` clauses to view and forward all incoming sensor data from our IoT device without any additional filtering or transformation. This straightforward query effectively demonstrates how Azure Stream Analytics ingests and processes real-time IoT data, establishing a foundation upon which you can build more advanced and powerful data analytics workflows in the future. -## Summary and Next Steps -In this section, you successfully set up an Azure Stream Analytics job to analyze and process real-time telemetry data streamed from your Python-based IoT simulator. You configured Azure IoT Hub as the data input source for Stream Analytics, defined a SQL-like query to select sensor readings, and confirmed that data was streaming correctly from your simulated Arm64-powered device into Azure. - -You established a real-time analytics pipeline that integrates with Azure IoT Hub, enabling immediate analysis of sensor data as it arrives in the cloud. In the next step, you’ll build upon this foundation by defining an additional query within your Azure Stream Analytics job. This new query will direct processed sensor data into Azure Cosmos DB. By writing the streaming IoT data into Cosmos DB, you’ll securely persist sensor telemetry, making it readily available for long-term storage, efficient retrieval, further analysis, and integration into applications or dashboards. +You have established a real-time analytics pipeline that integrates with Azure IoT Hub, enabling immediate analysis of sensor data as it arrives in the cloud. In the next step, you will build upon this foundation by defining an additional query within your Azure Stream Analytics job. This new query will direct processed sensor data into Azure Cosmos DB. By writing the streaming IoT data into Cosmos DB, you will securely persist sensor telemetry, making it readily available for long-term storage, efficient retrieval, further analysis, and integration into applications or dashboards. diff --git a/content/learning-paths/iot/iot-sdk/aws.md b/content/learning-paths/iot/iot-sdk/aws.md index 7f1bf5b619..882bc9c1e6 100644 --- a/content/learning-paths/iot/iot-sdk/aws.md +++ b/content/learning-paths/iot/iot-sdk/aws.md @@ -168,7 +168,7 @@ ML_HEARD_ON ... ``` -![keyword #center](Images/keyword.png) +![keyword #center](images/keyword.png) ## Repeat with the speech example @@ -185,6 +185,6 @@ INFO - Complete recognition: turn down the temperature in the bedroom 134 14506 [ML_MQTT] [INFO] Publish to the MQTT topic /ml/inference. ``` -![speech #center](Images/speech.png) +![speech #center](images/speech.png) See the [README.md](https://github.com/ARM-software/open-iot-sdk/blob/main/v8m/README.md#setting-up-aws-connectivity) for full details. \ No newline at end of file diff --git a/content/learning-paths/iot/iot-sdk/Images/keyword.png b/content/learning-paths/iot/iot-sdk/images/keyword.png similarity index 100% rename from content/learning-paths/iot/iot-sdk/Images/keyword.png rename to content/learning-paths/iot/iot-sdk/images/keyword.png diff --git a/content/learning-paths/iot/iot-sdk/Images/speech.png b/content/learning-paths/iot/iot-sdk/images/speech.png similarity index 100% rename from content/learning-paths/iot/iot-sdk/Images/speech.png rename to content/learning-paths/iot/iot-sdk/images/speech.png diff --git a/content/learning-paths/laptops-and-desktops/_index.md b/content/learning-paths/laptops-and-desktops/_index.md index 4211895996..20a7e5952f 100644 --- a/content/learning-paths/laptops-and-desktops/_index.md +++ b/content/learning-paths/laptops-and-desktops/_index.md @@ -10,15 +10,15 @@ maintopic: true operatingsystems_filter: - Android: 2 - ChromeOS: 1 -- Linux: 30 +- Linux: 31 - macOS: 7 -- Windows: 40 +- Windows: 42 subjects_filter: -- CI-CD: 4 +- CI-CD: 5 - Containers and Virtualization: 5 - Migration to Arm: 27 -- ML: 1 -- Performance and Architecture: 24 +- ML: 2 +- Performance and Architecture: 25 subtitle: Create and migrate apps for power efficient performance title: Laptops and Desktops tools_software_languages_filter: @@ -26,21 +26,24 @@ tools_software_languages_filter: - Alacritty: 1 - Android Studio: 1 - Arm Development Studio: 1 +- Arm Performance Libraries: 2 - Arm64EC: 1 - assembly: 1 - C: 3 - C#: 6 -- C++: 5 +- C++: 6 - C/C++: 4 - CCA: 1 - Clang: 11 +- cmake: 1 - CMake: 2 - Coding: 16 - CSS: 1 - Daytona: 1 - Docker: 4 -- GCC: 9 -- GitHub: 2 +- GCC: 10 +- Git: 1 +- GitHub: 3 - GitLab: 1 - GoogleTest: 1 - HTML: 2 @@ -52,22 +55,23 @@ tools_software_languages_filter: - Linux: 1 - LLVM: 1 - llvm-mca: 1 +- MSBuild: 1 - MTE: 1 - Neon: 1 - Neovim: 1 - Node.js: 3 - OpenCV: 1 - perf: 4 -- Python: 3 +- Python: 4 - Qt: 2 - Remote.It: 1 - RME: 1 -- Runbook: 17 +- Runbook: 18 - Rust: 2 - SVE: 1 - SVE2: 1 - Trusted Firmware: 1 -- Visual Studio: 12 +- Visual Studio: 14 - Visual Studio Code: 9 - VS Code: 3 - Windows Forms: 1 diff --git a/content/learning-paths/laptops-and-desktops/electron/Figures/01.png b/content/learning-paths/laptops-and-desktops/electron/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/electron/Figures/01.png rename to content/learning-paths/laptops-and-desktops/electron/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/electron/Figures/02.png b/content/learning-paths/laptops-and-desktops/electron/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/electron/Figures/02.png rename to content/learning-paths/laptops-and-desktops/electron/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/electron/Figures/03.png b/content/learning-paths/laptops-and-desktops/electron/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/electron/Figures/03.png rename to content/learning-paths/laptops-and-desktops/electron/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/electron/Figures/04.png b/content/learning-paths/laptops-and-desktops/electron/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/electron/Figures/04.png rename to content/learning-paths/laptops-and-desktops/electron/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/electron/how-to-1.md b/content/learning-paths/laptops-and-desktops/electron/how-to-1.md index 53488e2390..2d8c0a1377 100644 --- a/content/learning-paths/laptops-and-desktops/electron/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/electron/how-to-1.md @@ -20,7 +20,7 @@ You can find the the complete code used in the learning path [here](https://gith ## Before you begin Before you begin, install Node.JS for Arm64. You can find the installer [here](https://nodejs.org/en/download). In this learning path, you will use version 20.10.0. The installation process is automatic. However, make sure to check the "Automatically install the necessary tools" checkbox so that it automatically installs the build tools for the NPM packages: -![fig1](Figures/01.png) +![fig1](figures/01.png) ## Initialize the project Start by initializing the project. To do this, open the command prompt or terminal and type the following commands: @@ -218,7 +218,7 @@ npm start The application window appears. Then click the Fetch data button and you will see the list of posts: -![fig2](Figures/02.png) +![fig2](figures/02.png) ## Styling an application The application works fine. However, it uses default styles and does not look very pretty. To change this you can use cascading style sheets (CSS) as per web applications. To style the application, proceed as follows: @@ -302,11 +302,11 @@ body { After the application launches, click the **Fetch data** button and you will see the following result: -![fig3](Figures/03.png) +![fig3](figures/03.png) ## Checkpoint You now have the application up and running. By default, it runs using the Arm64 architecture because we used Node.js for Arm64. To confirm this, open the Task Manager, click the Details tab and look for **electron.exe** processes: -![fig4](Figures/04.png) +![fig4](figures/04.png) In the next step, you will configure your application such that you can explicitly build it for x64 and Arm64 platforms. diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/actions.png b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/actions.png new file mode 100644 index 0000000000..3767458c42 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/actions.png differ diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/capacity.png b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/capacity.png new file mode 100644 index 0000000000..dfb0ff4e34 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/capacity.png differ diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/cpuinfo.png b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/cpuinfo.png new file mode 100644 index 0000000000..b3c03b9f53 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/cpuinfo.png differ diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/new-runner.png b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/new-runner.png new file mode 100644 index 0000000000..1972c972d4 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/new-runner.png differ diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/platform.png b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/platform.png new file mode 100644 index 0000000000..30ebb83bc9 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/platform.png differ diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/size.png b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/size.png new file mode 100644 index 0000000000..4aa60c26fb Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/size.png differ diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/win_image.png b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/win_image.png new file mode 100644 index 0000000000..16c3d85212 Binary files /dev/null and b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_images/win_image.png differ diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_index.md b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_index.md new file mode 100644 index 0000000000..139a27acf1 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_index.md @@ -0,0 +1,49 @@ +--- +title: Automate Windows on Arm Builds with GitHub Arm-hosted Runners + +minutes_to_complete: 20 + +who_is_this_for: This introductory tutorial is for software developers looking to automate Windows application builds on Arm architecture using GitHub Actions. + +learning_objectives: + - Describe GitHub Arm-hosted Windows runners. + - Configure workflows to run on Arm-hosted runners. + - Automate Windows application builds with GitHub Actions. + +prerequisites: + - A GitHub account. + - Familiarity with GitHub Actions. + +author: + - Pareena Verma + +### Tags +skilllevels: Introductory +subjects: CI-CD +armips: + - Cortex-A +operatingsystems: + - Windows +tools_software_languages: + - GitHub + - Visual Studio + - MSBuild + - Arm Performance Libraries + +further_reading: + - resource: + title: GitHub Actions Partner Images Repository + link: https://github.com/actions/partner-runner-images/ + type: documentation + - resource: + title: GitHub Actions now supports Windows on Arm runners for all public repos + link: https://blogs.windows.com/windowsdeveloper/2025/04/14/github-actions-now-supports-windows-on-arm-runners-for-all-public-repos/ + type: blog + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 +layout: "learningpathall" +learning_path_main_page: "yes" +--- + diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/_next-steps.md b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_next-steps.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/_next-steps.md rename to content/learning-paths/laptops-and-desktops/gh-arm-runners-win/_next-steps.md diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/automate-win-app.md b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/automate-win-app.md new file mode 100644 index 0000000000..c984dd55f4 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/automate-win-app.md @@ -0,0 +1,84 @@ +--- +title: Automate the Build of Windows Applications + +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +{{% notice Learning Tip %}} +In this section, you will learn how to automate the build process of a Windows application using GitHub Arm-hosted runners. You will use the application in the [Optimize Windows applications using Arm Performance Libraries Learning Path](/learning-paths/laptops-and-desktops/windows_armpl/). +{{% /notice %}} + +### About the Windows Application + +This section provides a basic overview of the application. For detailed build instructions, see the [Optimize Windows applications using Arm Performance Libraries Learning Path](/learning-paths/laptops-and-desktops/windows_armpl/2-multithreading/). + +The source code for the rotating 3D cube application, which demonstrates multiple programming approaches to performing rotation calculations, is available in this GitHub repository. + +```console +https://github.com/arm/SpinTheCubeInGDI +``` + +The application implements a spinning cube and consists of four key components: +- **Shape Generation**: generates vertices for a sphere using a golden ratio-based algorithm. +- **Rotation Calculation**: uses a rotation matrix to rotate the 3D shape around the X, Y, and Z axes. +- **Drawing**: draws the transformed vertices of the shapes on the screen using a Windows API. +- **Performance Measurement**: measures and displays the number of transforms per second. + +The code has two options to calculate the rotation: + +1. **Multithreading**: distributes rotation calculations across multiple threads to improve performance. +2. **Arm Performance Libraries**: uses Arm-optimized math functions to accelerate rotation calculations. + +You will learn how to automate the build process for this application by using GitHub Actions with Arm-hosted Windows runners. + +### Automate the Build Process + +The [GitHub Actions workflow `msbuild.yml`](https://github.com/arm/SpinTheCubeInGDI/blob/main/.github/workflows/msbuild.yml) that automates the build process using MSBuild for Windows on Arm is included in the repository. + +Below is an explanation of the steps in the workflow: + + + **Trigger Events**: the workflow runs when there is a push or pull request event on the main branch. + + **Job Definition**: a single job named `build` is defined. It runs on the GitHub Arm-hosted Windows runner (`windows-11-arm`) as shown: + +```console +jobs: + build: + runs-on: windows-11-arm +``` + **Checkout Repository**: uses the `actions/checkout@v4` action to fetch the code. + + **Add MSBuild to PATH**: adds MSBuild tools for the build process using `microsoft/setup-msbuild@v1.0.2`. + + **Restore Dependencies**: runs `nuget restore` to restore NuGet packages required by the solution. + + **Create Download Directory**: creates a directory to store downloaded files and verifies the Python version. + + **Download ARM Performance Libraries**: downloads the Windows installer for ARM Performance Libraries (APL) and verifies the downloaded files. + + **Install ARM Performance Libraries**: installs the downloaded ARM Performance Libraries using `msiexec.exe` with a quiet mode and logs the process. + + **Check Installation Success**: verifies the success of the APL installation by checking the exit code and logs. + + **Build the Solution**: runs MSBuild to build the solution with the specified configuration (Debug) and platform (ARM64). + + **Upload Build Artifact**: uploads the built executable as an artifact using `actions/upload-artifact@v4`. + +This workflow automates dependency management, environment setup, project compilation, and artifact storage - all using a GitHub Arm-hosted Windows runner. + +### Fork the Repository and Run the Workflow + +To run the workflow in your own GitHub account, start by forking the repository. + +To fork the repository, go to the repository page on GitHub and click the **Fork** button in the top right corner. This will create a copy of the repository under your own GitHub account. + +You can then run the workflow in your forked repository by navigating to the **Actions** tab and selecting the MSBuild workflow, then clicking **Run workflow**. + +You can view the **Actions** logs in the repository for each step. +![action #center](_images/actions.png) + +You have learned how to build a Windows application and upload the result as an artifact of your workflow using the GitHub Arm-hosted Windows runner. diff --git a/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/introduction.md b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/introduction.md new file mode 100644 index 0000000000..8b1a2d8352 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/gh-arm-runners-win/introduction.md @@ -0,0 +1,84 @@ +--- +title: Introduction to GitHub Arm-hosted Runners + +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- +### Overview + +GitHub now supports Arm-hosted Windows runners, making it easy to run workflows on Arm hardware without managing infrastructure yourself. This Learning Path introduces what they are, how to use them, and how to configure your own larger runner. + +### What are GitHub Arm-hosted Runners? + +Runners are the machines that execute jobs in a GitHub Actions workflow. An Arm-hosted runner is managed by GitHub and uses the Arm architecture, meaning you don't need to provide a server to run Actions workflows. GitHub provides the system and runs the Action workflows for you. + +GitHub Arm-hosted runners for Windows are now available in public preview, providing a powerful and efficient way to build, test, and deploy your Windows applications on Arm without the need for virtualization. These runners come with a Windows 11 Desktop image, equipped with many tools to get started with your workflows. + +Arm-hosted runners are available for public repositories at no cost, subject to [standard usage limits](https://docs.github.com/en/actions/administering-github-actions/usage-limits-billing-and-administration#usage-limits). + +### How Do I Use GitHub Arm-hosted Windows Runners? + +To leverage the GitHub Windows Arm64 hosted runners, you need to use the following label in your public repository workflow runs: + +```yaml +runs-on: windows-11-arm +``` + +{{% notice Note %}} +This label does not work in private repositories - the workflow will fail if you use it. +{{% /notice %}} + + +### What Types of GitHub-hosted Runners Are Available? + +GitHub offers two types of hosted runners: standard and larger runners. Larger runners give you more control—you can configure the amount of RAM, number of CPUs, and disk space. You can also use the Windows 11 Desktop Arm Image with larger runners. To use this image on larger runners, you will need to create a new runner. + +### How Do I Create a Larger GitHub Arm-hosted Windows Runner? + +Arm-hosted runners are created at the organization level. + +Navigate to your organization and select the **Settings** tab. On the left pane, select **Actions->Runners**. + +On the **Runners** page, select the **New runner** drop-down on the top right, and then select **New GitHub-hosted runner**. + +![new-runner #center](_images/new-runner.png) + +Specify a name for the runner. This name is used in the `runs-on` field in your workflows, so make it clear for others who use it. + +Choose Windows ARM64 for the platform and click **Save**. + +![platform #center](_images/platform.png) + +Specify the operating system image for the runner, select **Microsoft Windows 11 Desktop by Arm Limited**, and click **Save**. + +![image #center](_images/win_image.png) + +Select the size of the larger runner you need and click **Save**. + +![specifications #center](_images/size.png) + +The **Capacity** section includes the maximum concurrency, which is the number of jobs to run at the same time. + +You can also set the runner group for this runner. The runner group controls this runner’s settings. You may need to return to the group if configuration changes are required. + +![capacity #center](_images/capacity.png) + +Finally, click **Create runner**. + +Your new Arm-hosted Windows larger runner is ready to use. Remember the runner name so you can use it in your workflows with the `runs-on` label. + +### How Do I Check the Server Hardware Used by the Arm-hosted Windows Runners? + +The Arm-hosted runners are powered by Microsoft Azure Cobalt 100 processors, based on the Arm Neoverse N2, with 4 vCPUs and Armv9-A features, including Scalable Vector Extension 2 (SVE2). + +The following output shows the result of running the `wmic` command on the Arm-hosted runner. + +![Arm-hosted runner info #center](_images/cpuinfo.png) + + +### How Can I Find the Software Installed on the Arm-hosted Windows Runners? + +To find out more about the software installed on the Arm-hosted runners, visit the [GitHub Actions Partner Images repository](https://github.com/actions/partner-runner-images/). Check the [image documentation](https://github.com/actions/partner-runner-images/blob/main/images/arm-windows-11-image.md) for a list of software packages installed on the Windows 11 by Arm Limited image. You can also use this repository to report issues or request additional software. + diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/create-self-hosted-runner-github.md b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/create-self-hosted-runner-github.md index ac41ab2bc5..fc85d4001f 100644 --- a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/create-self-hosted-runner-github.md +++ b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/create-self-hosted-runner-github.md @@ -11,13 +11,13 @@ layout: "learningpathall" ## Create the Self-Hosted Runner in GitHub Go to the Actions tab, and under Management, click on Runners. If you do not see the Actions tab, navigate to Settings > Actions and select Allow all actions and reusable workflows. This will display the following window: -![img8](Figures/08.png) +![img8](figures/08.png) Then, click on the 'New runner' button, followed by 'New self-hosted runner'. In the 'Add new self-hosted runner' section, proceed as follows: * Select Linux for the operating system. * Choose ARM64 for the architecture -![img9](Figures/09.png) +![img9](figures/09.png) Next, execute the following instructions on your machine: @@ -100,6 +100,6 @@ Current runner version: '2.314.1' The runner will now be visible in the GitHub actions: -![img10](Figures/10.png) +![img10](figures/10.png) You have successfully prepared all the tools needed to run the CI/CD pipeline on the self-hosted runner. In the next step, you will test it. \ No newline at end of file diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/01.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/01.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/02.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/02.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/03.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/03.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/04.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/04.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/05.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/05.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/06.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/06.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/07.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/07.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/08.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/08.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/09.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/09.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/09.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/09.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/10.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/10.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/10.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/10.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/11.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/11.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/11.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/11.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/12.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/12.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/12.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/12.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/13.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/13.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/13.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/13.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/14.png b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/14.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/Figures/14.png rename to content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/figures/14.png diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/how-to-2.md b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/how-to-2.md index 85be3f78f1..19899b926c 100644 --- a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/how-to-2.md @@ -100,21 +100,21 @@ To run the above pipeline, you need to make modifications to the source code and This change updates the page's main header to greet Arm developers specifically. -![img11](Figures/11.png) +![img11](figures/11.png) 3. Below the file editor, find and click the Commit changes button. This action opens the Commit changes window. 4. In the Commit changes window, you have the option to add a commit message describing your changes. After reviewing your message, click Commit changes to finalize the update: -![img12](Figures/12.png) +![img12](figures/12.png) Committing these changes to the main branch will automatically trigger the CI/CD pipeline. You can monitor the progress of this pipeline under the Actions tab of your GitHub repository: -![img13](Figures/13.png) +![img13](figures/13.png) Upon completion of the second job, which builds and pushes the Docker image, you can find the updated Docker image in your Docker Hub repository. The new image will be tagged as latest and will reflect the changes made to the index.cshtml file: -![img14](Figures/14.png) +![img14](figures/14.png) This process demonstrates how code changes in your repository can seamlessly integrate with GitHub Actions to automate the build and deployment of your application, including updating Docker images in Docker Hub. diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/prepare-github-repo.md b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/prepare-github-repo.md index d0ad97fbb8..424d5eab49 100644 --- a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/prepare-github-repo.md +++ b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/prepare-github-repo.md @@ -14,7 +14,7 @@ Now, you will prepare your GitHub repository as follows: 1. Go to GitHub and log in with your account credentials. 2. In the top right corner, click the + dropdown and select Import repository. -![img5](Figures/05.png) +![img5](figures/05.png) 3. In the Import your project to GitHub section: * Paste the following link https://github.com/dawidborycki/arm-lp-ci-cd-net.git into the 'Your old repository's clone URL' field. @@ -23,14 +23,14 @@ Now, you will prepare your GitHub repository as follows: After the import is complete, you will create two secrets that will store your Docker username and token. Proceed as follows 1. Go to your repository settings, and on the left, scroll down to Secrets and variables, then click Actions: -![img6](Figures/06.png) +![img6](figures/06.png) 2. Under Actions secrets and variables, click the New repository secret button. This opens the New secret window, where you should configure the secret as follows: * Name: DOCKER_USERNAME * Value: * Click the Add secret button -![img7](Figures/07.png) +![img7](figures/07.png) 3. Similarly, create another secret: * Name: DOCKER_PASSWORD diff --git a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/set-up-docker-repo.md b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/set-up-docker-repo.md index 24c7764ac2..cdddbf8c5a 100644 --- a/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/set-up-docker-repo.md +++ b/content/learning-paths/laptops-and-desktops/self_hosted_cicd_github/set-up-docker-repo.md @@ -11,21 +11,21 @@ First, create a repository on DockerHub by following these steps: 1. Log in to DockerHub, then click on *Repositories* at the top. 2. Under *Repositories*, click the *Create repository* button: -![img1](Figures/01.png) +![img1](figures/01.png) 3. In the *Create repository* window, set the repository name to sampleapp and change the repository visibility to *Private*. -![img2](Figures/02.png) +![img2](figures/02.png) 4. Click the *Create* button. This will create the repository and redirect you back to the repositories list. Note the Docker push command. It should look like: docker push /sampleapp:tagname -![img3](Figures/03.png) +![img3](figures/03.png) Now you will need to generate the access token to enable the workflow to authenticate to DockerHub. To create the access token: 1. Click on your username in the top-right corner of the Docker Hub site. 2. From the dropdown menu, select My Account, and then click the Security tab on the left: -![img4](Figures/04.png) +![img4](figures/04.png) 3. Click the New Access Token button. This opens the New Access Token window, in which: * Type ci-cd for the token name. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/07.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/07.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/08.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/08.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/09.png b/content/learning-paths/laptops-and-desktops/win_aws_iot/figures/09.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot/Figures/09.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot/figures/09.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-2.md b/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-2.md index b58a1294a5..4605f8157a 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-2.md @@ -12,15 +12,15 @@ In this section, you connect the emulator to AWS IoT Core. ## Connecting the emulator Log in to the AWS Console at https://aws.amazon.com, and then search for IoT. From the list that appears, select **IoT Core**: -![fig1](Figures/01.png) +![fig1](figures/01.png) In the AWS IoT console, click **Connect one device**: -![fig2](Figures/02.png) +![fig2](figures/02.png) This will open the **Connect one device** wizard: -![fig3](Figures/03.png) +![fig3](figures/03.png) In the first step of this wizard, **Register and secure your device**, you can see the ping command, which you can use to ensure you can connect to AWS services. @@ -44,17 +44,17 @@ Note that the **a6t44uzbanvsz-ats.iot.eu-central-1.amazonaws.com** is your endpo Click the **Next** button. This takes you to the **Register and secure your device** step, in which you type **WeatherEmulator** under the **Thing** name: -![fig4](Figures/04.png) +![fig4](figures/04.png) Then, click the **Next** button, which opens the **Choose platform and SDK** window, in which you select: * **Windows** from the **Device platform operating system**. * **Node.js** from the **AWS IoT Device SDK**. -![fig5](Figures/05.png) +![fig5](figures/05.png) Afterwards, click the **Next** button, which takes you to the **Download connection kit** step, in which you click the **Download connection kit** button to get the zip with certificates and the sample code, which sends data to the AWS cloud: -![fig6](Figures/06.png) +![fig6](figures/06.png) After downloading the kit, click the **Next** button. This takes you to the **Run connection kit**, in which you click the **Continue** button. This takes you back to the AWS IoT screen. @@ -83,11 +83,11 @@ Before you can connect the device to the AWS IoT Core, you need to configure the To update the policy, under the AWS IoT console, click **Security** -> **Policies**: -![fig7](Figures/07.png) +![fig7](figures/07.png) There should be one policy, **WeatherEmulator-Policy**. Click that policy. This opens another view, in which you can see the policy details. In that view, click **Edit active version** button, which opens the Edit policy view: -![fig7](Figures/08.png) +![fig7](figures/08.png) Modify the first entry ('iot:Publish,iot:Receive,iot:PublishRetain') such that you replace the 'topic/sdk/test/js' with 'topic/*'. This will enable the application to publish and receive messages from any topic. Similarly, modify the second entry, and replace 'topicfilter/sdk/test/js' with 'topicfilter/*'. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-3.md b/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-3.md index 6af29c568a..53ef18ebb9 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-3.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot/how-to-3.md @@ -16,5 +16,5 @@ Under AWS IoT Core, click on **MQTT test client** under the **Test** menu. In the 'Topic Filter' field, type 'Emulators/Weather/SensorReadings', and click the **Subscribe** button. You will see the data sent by the emulator displayed: -![fig9](Figures/09.png) +![fig9](figures/09.png) diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/07.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/07.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/08.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/Figures/08.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/how-to-1.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/how-to-1.md index 6bc519d6b6..266324449f 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_dynamodb/how-to-1.md @@ -13,11 +13,11 @@ Start by creating the rule: 1. Open AWS IoT Core, and look for *Rules* under *Message routing*: -![fig1](Figures/01.png) +![fig1](figures/01.png) 2. Click the *Create rule* button. This will opens *Create rule* view, in which you configure the rule: -![fig2](Figures/02.png) +![fig2](figures/02.png) 3. Use the *Create rule* view to configure the rule as follows. Firstly, under the rule name, type: send_message_to_dynamodb. Then, click the *Next* button. This will take you to the Configure SQL statement, where you use the following SQL query: @@ -25,25 +25,25 @@ Start by creating the rule: SELECT * FROM 'Emulators/Weather/SensorReadings' ``` -![fig3](Figures/03.png) +![fig3](figures/03.png) 4. Click the *Next* button. -![fig4](Figures/04.png) +![fig4](figures/04.png) 5. This opens the *Attach rule actions*, where under *Action 1*, select *DynamoDBv2*. This activates additional controls (see figure below): -![fig5](Figures/05.png) +![fig5](figures/05.png) 6. Click the *Create DynamoDB* table. This will open another tab, where you type *SensorReadings* for the Table name and *timestamp* for the partition key. Ensure to check *Default settings*, scroll down, and click the *Create table* button. Wait a few moments for the table to be created. Then, return to the *Create rule* view. -![fig6](Figures/06.png) +![fig6](figures/06.png) 7. In the *Create rule* view, click the *Refresh* button next to the *Choose DynamoDB* table name drop-down. Then, select the *SensorReadings* table from that drop-down. Afterward, click the *Create new role* button (located below the IAM role). This activates the *Create role* pop-up. 8. In the *Create role* pop-up window, type 'weather_station_dynamo_db', and click the *Create* button. By now, the *Attach rule* actions screen looks like this: -![fig7](Figures/07.png) +![fig7](figures/07.png) 9. Scroll down and click the *Next* button. This displays the summary screen, where you click the *Create* button. @@ -66,7 +66,7 @@ This starts the emulator, which streams data to the AWS IoT Core. The output of To ensure this data is written to the DynamoDB table, go to the AWS console, and in the search box type 'DynamoDB'. Select *DynamoDB* from the list. This takes you to the DynamoDB dashboard, where you click *Explore items* on the left. Then, check the *SensorReadings* radio button, and you see the items in the table: -![fig8](Figures/08.png) +![fig8](figures/08.png) Note that the values in the table are the same as those generated by the emulator. This ensures that you have successfully integrated AWS IoT Core with DynamoDB. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/create-rule.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/create-rule.md index 8304f1c3de..0bb4a84cc7 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/create-rule.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/create-rule.md @@ -14,11 +14,11 @@ Start by creating the rule: 1. Open AWS IoT Core, and look for *Rules* under *Message routing*: -![fig1](Figures/01.png) +![fig1](figures/01.png) 2. Click the *Create rule* button. This opens the *Create rule* view, in which you configure the rule: -![fig2](Figures/02.png) +![fig2](figures/02.png) 3. Use the *Create rule* view to configure the rule. Firstly, under the rule name, type: **check_temperature**. Then, click the *Next* button. This takes you to the Configure SQL statement, where you use the following SQL query: @@ -32,29 +32,29 @@ This query ensures that only temperature readings are sent to the AWS Lambda fun 5. This opens the *Attach rule actions*, where under *Action 1*, select *Lambda*. This activates additional controls (see figure below): -![fig3](Figures/03.png) +![fig3](figures/03.png) 6. Click the *Create a Lambda function* button. This opens another tab. Select *Author from scratch*, type **SendNotification** for the function name, select *Node.js 20.x* for the runtime, and *arm64* for the architecture: -![fig4](Figures/04.png) +![fig4](figures/04.png) 7. Scroll down, and under the *Execution role*, select *Create a new role from AWS policy templates*. This activates additional controls, where you type **sns-email-role** for the *Role name*. Then, click the drop-down list *Policy templates*, and select *Amazon SNS publish policy*: -![fig5](Figures/05.png) +![fig5](figures/05.png) 8. Click the *Create function* button. This takes you to the AWS Lambda Console, which looks as shown below: -![fig6](Figures/06.png) +![fig6](figures/06.png) 9. Keep the AWS Lambda Console open in a separate tab and go back to the *Create rule* wizard. Select *SendNotification* from the Lambda function dropdown, and ensure that the Lambda function version points to $LATEST: -![fig7](Figures/07.png) +![fig7](figures/07.png) 10. Click the *Next* button, and then the *Create* button. 11. Go back to AWS Lambda Console and refresh the page. You will now see that your function accepts the AWS IoT trigger: -![fig8](Figures/08.png) +![fig8](figures/08.png) This means that the AWS Lambda function you have just created has an input trigger set to AWS IoT. Specifically, the AWS Lambda function is invoked whenever a new message is sent to the 'Emulators/Weather/SensorReadings' topic. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/07.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/07.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/08.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/08.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/09.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/09.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/09.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/09.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/10.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/10.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/10.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/10.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/11.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/11.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/11.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/11.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/12.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/12.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/12.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/12.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/13.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/13.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/13.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/13.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/14.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/14.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/14.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/14.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/15.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/15.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/15.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/15.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/16.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/16.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/16.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/16.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/17.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/17.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/17.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/17.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/18.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/18.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/18.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/18.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/19.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/19.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/19.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/19.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/20.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/20.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/20.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/20.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/21.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/21.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/21.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/21.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/22.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/22.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/22.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/22.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/23.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/23.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/23.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/23.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/24.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/24.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/Figures/24.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/figures/24.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/implement-lambda.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/implement-lambda.md index 55804b65a2..aee054e155 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/implement-lambda.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda/implement-lambda.md @@ -171,7 +171,7 @@ In summary, this Lambda function creates an SNS topic and subscribes an email ad After making these changes, the Lambda code editor looks as shown below: -![fig9](Figures/09.png) +![fig9](figures/09.png) Now, click the Deploy button to apply code changes. @@ -180,7 +180,7 @@ Now, click the Deploy button to apply code changes. ### Test Events You will now create two test events to manually invoke the Lambda function. Click the Test button. This opens the Configure test event window: -![fig10](Figures/10.png) +![fig10](figures/10.png) In this window, type **temperature-normal-level** for the Event name, and then under Event JSON paste the following payload: @@ -194,7 +194,7 @@ Finally, click the *Save* button. This takes you back to the Lambda function con Click the triangle icon, in the Test button. This expands the menu, from which you select *Configure test event*: -![fig11](Figures/11.png) +![fig11](figures/11.png) This opens the *Configure test event* window, where you select *Create new event*, change the event name to **temperature-high-level**, and paste the following payload for the Event JSON: @@ -206,18 +206,18 @@ This opens the *Configure test event* window, where you select *Create new event The *Configure test event* window should looks as shown: -![fig12](Figures/12.png) +![fig12](figures/12.png) Click the *Save* button to add a new test event. This takes you back to the Lambda function console. ### Testing the function In the Lambda function console, click the triangle icon next to the Test button, and select *temperature-normal-level* event: -![fig13](Figures/13.png) +![fig13](figures/13.png) Then, click the *Test* button. The Lambda function is invoked, and you see the following execution result: -![fig14](Figures/14.png) +![fig14](figures/14.png) This means that our Lambda function does not have the necessary permissions to create the SNS topic. @@ -231,40 +231,40 @@ Specifically, you will attach the policy giving the Lambda a full access to the To modify the role, proceed as shown: 1. Go the AWS console, and type **IAM** in the search box. Then, select IAM from the list: -![fig15](Figures/15.png) +![fig15](figures/15.png) 2. This takes you to the IAM Dashboard, where you click *Roles under Access management*: -![fig16](Figures/16.png) +![fig16](figures/16.png) 3. Click the *sns-email-role*. This opens another screen, which should look like shown below: -![fig17](Figures/17.png) +![fig17](figures/17.png) 4. Click the *Add permissions* button. This activates the drop-down list, from which you select *Attach policies*. 5. In the *Attach policy to sns-email-role* view, type *SNSFull* in the search box. This filters the list of policies to display one item: *AmazonSNSFullAccess*. Check the check-box on the left of the policy name, and then click the *Add permissions* button: -![fig18](Figures/18.png) +![fig18](figures/18.png) The role has been updated. You can now go back to AWS Lambda. ### Testing the function After updating the role, the SendNotification Lambda function can create SNS topics and send emails. To test this, in the Lambda function console, select the *temperature-high-level* test event, and click the Test button. You will see that the test event was processed without an error: -![fig19](Figures/19.png) +![fig19](figures/19.png) Go to your mailbox and look for an email from AWS Notification. The first email asks you to confirm the subscription: -![fig20](Figures/20.png) +![fig20](figures/20.png) Click the *Confirm subscription* link. Then, invoke the *temperature-high-level* event one more time to see that you received an alert email. It will appear like this: -![fig21](Figures/21.png) +![fig21](figures/21.png) Now, you can start the weather station emulator you created in this [Learning Path](/learning-paths/laptops-and-desktops/win_aws_iot/). Observe the values generated by the emulator. When the temperature exceeds the threshold of 35, you will receive an email notification: -![fig22](Figures/22.png) +![fig22](figures/22.png) ## Environment Variables In the above code, you hardcoded an email, SNS topic, and temperature threshold. This means that every time those values change, you need to modify the function code and redeploy it. In practice, it is better to use environment variables. @@ -274,11 +274,11 @@ You can modify the function code to use environment variables. First, you need t Proceed as shown: 1. In the Lambda function dashboard, click the *Configuration* tab, and select *Environment variables*: -![fig23](Figures/23.png) +![fig23](figures/23.png) 2. Click the *Edit* button. This opens the *Edit environment variables* view, where you click *Add environment variable*. This activates additional controls, where you type **EMAIL** for *Key* and your email for *Value*. Click the *Add environment variable* button one more time, and add another variable with **TEMPERATURE_THRESHOLD** as *Key*, and **35** as *Value*: -![fig24](Figures/24.png) +![fig24](figures/24.png) 3. Click the *Save* button. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/create-lambda.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/create-lambda.md index e41659aad4..db4218db18 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/create-lambda.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/create-lambda.md @@ -13,7 +13,7 @@ You will now implement the AWS Lambda function to scan the records from the Dyna Go the AWS Lambda console. Then, create the function as shown below: 1. Click the *Create function* button: -![fig1](Figures/01.png) +![fig1](figures/01.png) 2. This opens a *Create function* wizard, in which you: * Select **Author from scratch**. @@ -23,19 +23,19 @@ Go the AWS Lambda console. Then, create the function as shown below: At this point your wizard should look as per the image below: -![fig2](Figures/02.png) +![fig2](figures/02.png) Next, in the *Create function* wizard expand *Change default execution role*, and proceed as follows: 1. Select **Create a new role from AWS policy templates**. 2. Type **role-lambda-to-dynamodb** under the Role name. 3. Under *Policy templates - optional* select **Simple microservice permissions**. -![fig3](Figures/03.png) +![fig3](figures/03.png) This ensures that your AWS Lambda function has all the necessary permissions to access the items in the DynamoDB table. Finally, scroll down and click the **Create function** button. This will take you to the GetAverageTemperature function dashboard: -![fig4](Figures/04.png) +![fig4](figures/04.png) In the next step, you will use this dashboard to modify the function code. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/testing.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/testing.md index ca249787c9..468e032a2f 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/testing.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_lambda_dynamodb/testing.md @@ -12,11 +12,11 @@ To test the function, launch the IoT emulator to write some data to the DynamoDB In the `GetAverageTemperature` function dashboard click the **Deploy** button: -![fig5](Figures/05.png) +![fig5](figures/05.png) You can then click the **Test** button. This will open the *Configure test event* window, in which you type **Test** in the Event name and click the **Save** button. This will take you back to the `GetAverageTemperature` function dashboard, where you click the **Test** button again. You will see the execution status, including the average temperature value as shown below: -![fig6](Figures/06.png) +![fig6](figures/06.png) # Summary In this learning path you have implemented an AWS Lambda function that processes data stored in a DynamoDB table. You utilized AWS SDK for JavaScript v3 to interact with DynamoDB. Specifically, to create a DynamoDB Query, you constructed a DynamoDB Scan Command with a FilterExpression to retrieve records from the last N minutes. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/add-lambda.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/add-lambda.md index 2e89fef453..06a7e14c2e 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/add-lambda.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/add-lambda.md @@ -13,7 +13,7 @@ You will now use the AWS Lambda console to retrieve the AWS Lambda endpoint for 1. Go to the AWS Lambda console, and click the `GetAverageTemperature` Lambda function. 2. In the Lambda function dashboard, click the **Configuration** tab and then the **Function URL** as shown below: -![fig2](Figures/02.png) +![fig2](figures/02.png) 3. Under the Function URL, click the **Create Function URL** button. 4. In the window that appears select NONE, scroll down to Additional Settings, and check **Configure cross-origin resource sharing (CORS)**. @@ -21,7 +21,7 @@ You will now use the AWS Lambda console to retrieve the AWS Lambda endpoint for The function URL will appear as follows: -![fig3](Figures/03.png) +![fig3](figures/03.png) Copy the link, and use it to replace the YOUR_API_GATEWAY_ENDPOINT_URL placeholder in the **index.js** file as follows: @@ -42,5 +42,5 @@ document.getElementById('fetchTemperatureButton').addEventListener('click', func Save the file, and open **index.html**. Then, click the **Get temperature button** and the average temperature will appear as shown below: -![fig4](Figures/04.png) +![fig4](figures/04.png) diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/deploy.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/deploy.md index 17df6ef5bf..4089ffbb17 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/deploy.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/deploy.md @@ -85,7 +85,7 @@ aws s3 website s3:/// --index-document index.html Finally, access the website by typing **http://.s3-website-.amazonaws.com**, where is the name of your bucket and stands for the region you're using (here that is eu-central-1): -![fig5](Figures/05.png) +![fig5](figures/05.png) ## Summary In this learning path you have learned how to create a simple static website that fetches and displays temperature data from an AWS Lambda function. The website consists of an HTML file (**index.html**), a CSS file (**styles.css**), and a JavaScript file (**index.js**). The JavaScript code within the website sends an HTTP request to an AWS Lambda function, which processes the request and returns the temperature data in a JSON format. This data is then displayed on the webpage. diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_aws_iot_s3/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_aws_iot_s3/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/static-website.md b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/static-website.md index 3016309f49..778fe3b466 100644 --- a/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/static-website.md +++ b/content/learning-paths/laptops-and-desktops/win_aws_iot_s3/static-website.md @@ -139,6 +139,6 @@ document.getElementById('fetchTemperatureButton').addEventListener('click', func After saving all the files, open the **index.html** file in a web browser. It should render as follows: -![fig1](Figures/01.png) +![fig1](figures/01.png) The website is now ready. However, to fetch temperatures, you will need to add the Lambda endpoint. You will configure this in the next section. diff --git a/content/learning-paths/laptops-and-desktops/win_cef/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_cef/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_cef/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_cef/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_cef/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_cef/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_cef/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_cef/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_cef/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_cef/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_cef/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_cef/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_cef/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_cef/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_cef/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_cef/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_cef/how-to-1.md b/content/learning-paths/laptops-and-desktops/win_cef/how-to-1.md index 47156388f9..9850222ee7 100644 --- a/content/learning-paths/laptops-and-desktops/win_cef/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win_cef/how-to-1.md @@ -15,7 +15,7 @@ In this learning path, you will learn how to create a desktop application for Wi Before you begin, install the following: 1. CMake for Arm64 [link](https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-windows-arm64.msi). During an installation check **Add CMake to the system PATH for the current user** as shown below -![fig1](Figures/01.png) +![fig1](figures/01.png) 2. Visual Studio 2022 with Desktop development with C++ workload @@ -103,7 +103,7 @@ Let's run the `cefsimple` application. The binaries can be found under the `buil By default, the application renders the Google homepage: -![fig2](Figures/02.png) +![fig2](figures/02.png) ## Modify the application You will now modify the `cefsimple` application to render a custom page. This page will fetch data from `JSONPlaceholder` and display it in a table. @@ -221,7 +221,7 @@ You will now create a custom index.html file to fetch the data from the JSONPlac Re-run the application and you will see the following: -![fig3](Figures/03.png) +![fig3](figures/03.png) This example demonstrates the versatility and power of CEF applications. One of their key strengths is the ability to modify the rendered content dynamically. This means you can change what's displayed in the application, like updating the HTML or CSS, without having to rebuild the entire application each time. This feature significantly streamlines the development process, making it quicker and more efficient. @@ -296,7 +296,7 @@ button:hover { After re-running the application you will see the following: -![fig4](Figures/04.png) +![fig4](figures/04.png) ## Summary CEF is a powerful tool for integrating web browser functionalities into desktop applications, utilizing modern web technologies. You have learned how to setup a project, create a basic application, and use CMake for CEF projects. Additionally, you have explored more advanced features like fetching and displaying data from JSONPlaceholder, adding and styling HTML elements like tables and buttons through separate CSS files all on your Windows on Arm machine. diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/07.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/07.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/08.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/08.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/09.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/09.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/09.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/09.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/10.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/10.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/10.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/10.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/11.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/11.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/11.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/11.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/12.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/12.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/12.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/12.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/Figures/13.png b/content/learning-paths/laptops-and-desktops/win_forms/figures/13.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_forms/Figures/13.png rename to content/learning-paths/laptops-and-desktops/win_forms/figures/13.png diff --git a/content/learning-paths/laptops-and-desktops/win_forms/how-to-1.md b/content/learning-paths/laptops-and-desktops/win_forms/how-to-1.md index 60890595ca..1363a4d065 100644 --- a/content/learning-paths/laptops-and-desktops/win_forms/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win_forms/how-to-1.md @@ -21,11 +21,11 @@ Before you begin, install Visual Studio 2022 with .NET Desktop Development workl ## Create the project Start by creating the project. Open Visual Studio and then click 'Create a new project'. -![fig1](Figures/01.png) +![fig1](figures/01.png) In the 'Create a new project' window, select the 'Windows Forms App' template. -![fig2](Figures/02.png) +![fig2](figures/02.png) This will open the 'Configure your new project' view, in which you should configure the project as follows (refer to the figure below): @@ -34,11 +34,11 @@ This will open the 'Configure your new project' view, in which you should config 3. Check the option **Place solution and project in the same directory** 4. Click the **Next** button -![fig3](Figures/03.png) +![fig3](figures/03.png) In the final step, 'Additional Information', select **.NET 8.0 (Long-Term Support)** from the 'Framework' dropdown list. Then, click the **Create** button. -![fig4](Figures/04.png) +![fig4](figures/04.png) Your project is now ready. Next, you will create the user interface and then implement the application logic. @@ -52,12 +52,12 @@ To create the user interface proceed as follows: * one ListBox 3. Position the controls as shown in the figure below: -![fig5](Figures/05.png) +![fig5](figures/05.png) 4. Right-click the first label and select the Properties from the context menu 5. In the Properties window, change the (Name) property to **labelArchitecture**: -![fig6](Figures/06.png) +![fig6](figures/06.png) 6. Similarly, rename the ListBox from **listBox1** to **listBoxResults** 7. Modify the properties of the first NumericUpDown control as follows: @@ -221,6 +221,6 @@ When the application is running and the user clicks the Start button, the applic ## Run the application The application is now ready to run. To do this, click 'Debug' > 'Start Debugging'. Visual Studio will then build and launch the application. Afterwards, click 'Start'. Then, change the matrix size to 200 and click the 'Start' button again. Repeat this process for matrix sizes of 300, 400, and 500. The results should appear as shown in the following figure: -![fig7](Figures/07.png) +![fig7](figures/07.png) By default, the application uses the Arm64 configuration. For each matrix size, you will observe the computation time. In the next step, you will learn how to change the build configuration to x64. This will allow you to compare the results with those obtained using x64. diff --git a/content/learning-paths/laptops-and-desktops/win_forms/how-to-2.md b/content/learning-paths/laptops-and-desktops/win_forms/how-to-2.md index 60c985f32c..f7d347edc0 100644 --- a/content/learning-paths/laptops-and-desktops/win_forms/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/win_forms/how-to-2.md @@ -11,30 +11,30 @@ In this section, you will change the build configuration of the application and ## Creating new build configurations To change the build configuration, click the target platform dropdown (by default, it displays 'Any CPU') and select 'Configuration Manager...': -![fig8](Figures/08.png) +![fig8](figures/08.png) In the Configuration Manager, select '' from the Active solution platform dropdown: -![fig9](Figures/09.png) +![fig9](figures/09.png) This will open the 'New Solution Platform' window, where you should select ARM64 from the 'Type or select the new platform' dropdown: -![fig10](Figures/10.png) +![fig10](figures/10.png) Then, click the OK button. Similarly, create the x64 solution platform ## Comparing the performance You will now compare the computation performance on x64 and Arm64 platforms. First, start the application in Release mode and architecture set to x64: -![fig11](Figures/11.png) +![fig11](figures/11.png) Once the application has started, run calculations for the following matrix sizes: 100, 200, 300, 400, and 500. You should see results similar to those in the following figure. -![fig12](Figures/12.png) +![fig12](figures/12.png) Next, launch the application for the Arm64 platform. Run the matrix multiplication for the same matrix sizes as above and observe the computation times: -![fig13](Figures/13.png) +![fig13](figures/13.png) By comparing the execution times, we observe that, on average, Arm64 provides almost a 30% performance improvement over x64. diff --git a/content/learning-paths/laptops-and-desktops/win_net8/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_net8/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_net8/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_net8/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_net8/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_net8/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_net8/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_net8/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_net8/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_net8/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_net8/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_net8/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_net8/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_net8/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_net8/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_net8/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_net8/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_net8/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_net8/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_net8/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_net8/how-to-1.md b/content/learning-paths/laptops-and-desktops/win_net8/how-to-1.md index 55abad63df..5c2ac50369 100644 --- a/content/learning-paths/laptops-and-desktops/win_net8/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win_net8/how-to-1.md @@ -95,10 +95,10 @@ dotnet run -c Release -f net8.0 The command will build and run the performance benchmarks. The output will look as follows: -![fig1](Figures/01.png) +![fig1](figures/01.png) The output lists all available benchmarks. You can type * to run all benchmarks or type a number to select individual performance tests. In this example, let's type **483** to execute the Perf_Doubles benchmark. After a while you will see a summary of the performance test. A portion of this output is shown in the figure below: -![fig2](Figures/02.png) +![fig2](figures/02.png) By default, the table summarizes the performance test results. You can see each performance test's execution time and statistics (mean, median, minimum, maximum, and standard deviation) which gives you a comprehensive overview of code performance. diff --git a/content/learning-paths/laptops-and-desktops/win_net8/how-to-2.md b/content/learning-paths/laptops-and-desktops/win_net8/how-to-2.md index 8047f083d0..1e4aea1b78 100644 --- a/content/learning-paths/laptops-and-desktops/win_net8/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/win_net8/how-to-2.md @@ -32,7 +32,7 @@ dotnet run -c Release Wait a few moments for the application to execute all the benchmarks and you will then see the results of the following form: -![fig3](Figures/03.png) +![fig3](figures/03.png) You will now compare execution times to x64 architecture (emulator on arm64) by typing: @@ -42,13 +42,13 @@ dotnet run -c Release -a x64 The above command will lead to significantly longer execution times than before: -![fig4](Figures/04.png) +![fig4](figures/04.png) The operations all take more time on the emulated x64 than when you execute them natively on Arm64. On average, native execution provides about a 19% performance improvement for list sorting, 11% for matrix multiplication, and 35% for string operations. The following chart summarizes the mean execution times for x64 and Arm64’s natively executed code. -![fig4](Figures/05.png) +![fig4](figures/05.png) ### Summary In this learning path you have learned how to benchmark .NET applications using both the default and custom benchmarks. By benchmarking the custom code, we demonstrated the performance boost when you natively execute the .NET code on an Arm64-powered device. diff --git a/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/1-dev-env-setup.md b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/1-dev-env-setup.md new file mode 100644 index 0000000000..2c689167ed --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/1-dev-env-setup.md @@ -0,0 +1,53 @@ +--- +title: Development environment +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Set up your development environment + +In this learning path, you will learn how to build and deploy a LLM on a Windows on Arm (WoA) laptop using ONNX Runtime for inference. + +You will first learn how to build the ONNX Runtime and ONNX Runtime Generate() API library and then how to download the Phi-3 model and run the inference. You will run the short context (4k) mini (3.3B) variant of Phi 3 model. The short context version accepts a shorter (4K) prompts and produces shorter output text compared to the long (128K) context version. The short version will consume less memory. + +Your first task is to prepare a development environment with the required software: + +- Visual Studio 2022 IDE (latest version recommended) +- Python 3.10 or higher +- CMake 3.28 or higher + +The following instructions were tested on an WoA 64-bit Windows machine with at least 16GB of RAM. + +## Install Visual Studio 2022 IDE + +Follow these steps to install and configure Visual Studio 2022 IDE: + +1. Download and install the latest version of [Visual Studio IDE](https://visualstudio.microsoft.com/downloads/). + +2. Select the **Community Version**. An installer called *VisualStudioSetup.exe* will be downloaded. + +3. From your Downloads folder, double-click the installer to start the installation. + +4. Follow the prompts and acknowledge **License Terms** and **Privacy Statement**. + +5. Once "Downloaded" and "Installed" complete select your workloads. As a minimum you should select **Desktop Development with C++**. This will install the **Microsoft Visual Studio Compiler** or **MSVC**. + +## Install Python + +Download and install [Python for Windows on Arm](/install-guides/py-woa) + +You will need Python version 3.10 or higher. This learning path was tested with version 3.11.9. + +## Install CMake + +CMake is an open-source tool that automates the build process for software projects, helping to generate platform-specific build configurations. + +[Download and install CMake](/install-guides/cmake) + +{{% notice Note %}} +The instructions were tested with version 3.30.5 +{{% /notice %}} + +You now have the required development tools installed to follow this learning path. diff --git a/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/2-build-onnxruntime.md b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/2-build-onnxruntime.md new file mode 100644 index 0000000000..dba9e29407 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/2-build-onnxruntime.md @@ -0,0 +1,59 @@ +--- +title: Build ONNX Runtime +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Compile ONNX Runtime for Windows on Arm +Now that you have your environment set up correctly, you can build the ONNX Runtime inference engine. + +ONNX Runtime is an open-source inference engine designed to accelerate the deployment of machine learning models, particularly those in the Open Neural Network Exchange (ONNX) format. ONNX Runtime is optimized for high performance and low latency, making it popular for production deployment of AI models. You can learn more by reading the [ONNX Runtime Overview](https://onnxruntime.ai/). + +### Clone ONNX Runtime Repo + +Open a Developer Command Prompt for Visual Studio to properly setup the environment including path to compiler, linker, utilities and header files. Create your workspace and check out the source tree: + +```bash +cd C:\Users\%USERNAME% +mkdir repos\lp +cd repos\lp +git clone --recursive https://github.com/Microsoft/onnxruntime.git +cd onnxruntime +git checkout 4eeefd7260b7fa42a71dd1a08b423d5e7c722050 +``` + +{{% notice Note %}} +You might be able to use a later commit. These steps have been tested with the commit `4eeefd7260b7fa42a71dd1a08b423d5e7c722050`. +{{% /notice %}} + +### Build for Windows + +You can build the "Release" configuration for a build optimized for performance but without debug information. + + +```bash +.\build.bat --config Release --build_shared_lib --parallel --compile_no_warning_as_error --skip_submodule_sync --skip_tests +``` + + +As an alternative, you can build with "RelWithDebInfo" configuration for a release-optimized build with debug information. + +```bash +.\build.bat --config RelWithDebInfo --build_shared_lib --parallel --compile_no_warning_as_error --skip_submodule_sync --skip_tests +``` + + +### Resulting Dynamic Link Library +When the build is complete, the `onnxruntime.dll` dynamic linked library can be found in: + +``` +dir .\build\Windows\Release\Release\onnxruntime.dll +``` + +or if you build with debug information it can be found in: + +``` +dir .\build\Windows\RelWithDebInfo\RelWithDebInfo\onnxruntime.dll +``` diff --git a/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/3-build-onnxruntime-generate-api.md b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/3-build-onnxruntime-generate-api.md new file mode 100644 index 0000000000..ab91cb0146 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/3-build-onnxruntime-generate-api.md @@ -0,0 +1,54 @@ +--- +title: Build ONNX Runtime Generate() API +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Compile the ONNX Runtime Generate() API for Windows on Arm + +The Generate() API in ONNX Runtime is designed for text generation tasks using models like Phi-3. It implements the generative AI loop for ONNX models, including: +- pre- and post-processing +- inference with ONNX Runtime- logits processing +- search and sampling +- KV cache management + +You can learn more by reading the [ONNX Runtime Generate() API page](https://onnxruntime.ai/docs/genai/). + +In this section you will learn how to build the Generate API() from source. + + +### Clone onnxruntime-genai Repo +Within your Windows Developer Command Prompt for Visual Studio, checkout the source repo: + +```bash +cd C:\Users\%USERNAME% +cd repos\lp +git clone https://github.com/microsoft/onnxruntime-genai +cd onnxruntime-genai +git checkout b2e8176c99473afb726d364454dc827d2181cbb2 +``` + +{{% notice Note %}} +You might be able to use later commits. These steps have been tested with the commit `b2e8176c99473afb726d364454dc827d2181cbb2`. +{{% /notice %}} + +### Build for Windows on Arm +The build command below has a ---config argument, which takes the following options: +- ```Release``` builds release build +- ```Debug``` builds binaries with debug symbols +- ```RelWithDebInfo``` builds release binaries with debug info + +You will build the `Release` variant of the ONNX Runtime Generate() API: + +```bash +pip install requests +python build.py --config Release --skip_tests +``` + +When the build is complete, confirm the ONNX Runtime Generate() API Dynamic Link Library has been created: + +```output +dir build\Windows\Release\Release\onnxruntime-genai.dll +``` diff --git a/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/4-run-benchmark-on-WoA.md b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/4-run-benchmark-on-WoA.md new file mode 100644 index 0000000000..5019c0ff53 --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/4-run-benchmark-on-WoA.md @@ -0,0 +1,101 @@ +--- +title: Run Phi3 model on a Windows on Arm machine +weight: 5 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Run a Phi-3 model on your Windows on Arm machine + +In this section, you will learn how to download the Phi3-mini model and run it on your Windows on Arm machine (physical or virtual machine). You will be use a simple model runner program which provides performance metrics. + +The Phi-3-mini (3.3B) model has a short (4k) context version and a long (128k) context version. The long context version can accept much longer prompts and produces longer output text, but it consumes more memory. +In this learning path, you will use the short context version, which is quantized to 4-bits. + +The Phi-3-mini model used here is in an ONNX format. + +### Setup + +[Phi-3 ONNX models](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) are hosted on HuggingFace. +Hugging Face uses Git for version control and to download ONNX model files, which can be quite large. +You will first need to install the Git Large File Storage (LFS) extension. + +``` bash +winget install -e --id GitHub.GitLFS +git lfs install +``` +If you don’t have winget, download and run the exe from the [official source](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage?platform=windows). +If the extension is already installed for you when you run the above ``git`` command it will say ``Git LFS initialized``. + +You then need to install the ``HuggingFace CLI``. + +``` bash +pip install huggingface-hub[cli] +``` + +### Download the Phi-3-mini (4k) model + +``` bash +cd C:\Users\%USERNAME% +cd repos\lp +huggingface-cli download microsoft/Phi-3-mini-4k-instruct-onnx --include cpu_and_mobile/cpu-int4-rtn-block-32-acc-level-4/* --local-dir . +``` +This command downloads the model into a folder called `cpu_and_mobile`. + +### Build model runner (ONNX Runtime GenAI C Example) +In the previous section you built ONNX RUntime Generate() API from source. +The headers and dynamic linked libraries that are built need to be copied over to appropriate folders (``lib`` and ``inclue``). +Building from source is a better practice because the examples usually are updated to run with the latest changes. + +``` bash +copy onnxruntime\build\Windows\Release\Release\onnxruntime.* onnxruntime-genai\examples\c\lib +cd onnxruntime-genai +copy build\Windows\Release\Release\onnxruntime-genai.* examples\c\lib +copy src\ort_genai.h examples\c\include\ +copy src\ort_genai_c.h examples\c\include\ +``` + +You can now build the model runner executable in the ''onnxruntime-genai'' folder using the commands below: + +``` bash +cd examples/c +cmake -A arm64 -S . -B build -DPHI3=ON +cd build +cmake --build . --config Release +``` + +After a successful build, a binary program called `phi3` will be created in the ''onnxruntime-genai'' folder: +```output +dir Release\phi3.exe +``` + +#### Run the model + +Use the runner you just built to execute the model with the following commands: + +``` bash +cd C:\Users\%USERNAME% +cd repos\lp +.\onnxruntime-genai\examples\c\build\Release\phi3.exe .\cpu_and_mobile\cpu-int4-rtn-block-32-acc-level-4\ cpu +``` + +This will allow the runner program to load the model. It will then prompt you to input the text prompt to the model as shown: + +```output +------------- +Hello, Phi-3! +------------- +C++ API +Creating config... +Creating model... +Creating tokenizer... +Prompt: (Use quit() to exit) Or (To terminate current output generation, press Ctrl+C) +``` + +After you enter your input prompt, the text output by the model will be displayed. On completion, performance metrics similar to those shown below should be displayed: + +``` +Prompt length: 64, New tokens: 931, Time to first: 1.79s, Prompt tokens per second: 35.74 tps, New tokens per second: 6.34 tps +``` +You have successfully run the Phi-3 model on your Windows device powered by Arm. diff --git a/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/_index.md b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/_index.md new file mode 100644 index 0000000000..81ead9d54b --- /dev/null +++ b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/_index.md @@ -0,0 +1,55 @@ +--- +title: Run Phi-3 on a Windows on Arm machine with ONNX Runtime + +draft: true +cascade: + draft: true + +minutes_to_complete: 60 + +who_is_this_for: A deep-dive for advanced developers looking to build ONNX Runtime on Windows on Arm (WoA) and leverage the Generate() API to run Phi-3 inference with KleidiAI acceleration. + +learning_objectives: + - Build ONNX Runtime and ONNX Runtime Generate() API for Windows on Arm. + - Run a Phi-3 model using ONNX Runtime on a Windows on Arm laptop. + +prerequisites: + - A Windows on Arm computer such as the Lenovo Thinkpad X13 running Windows 11 or a Windows on Arm [virtual machine](https://learn.arm.com/learning-paths/cross-platform/woa_azure/) + +author: Barbara Corriero + +### Tags +skilllevels: Advanced +subjects: ML +armips: + - Cortex-A + - Cortex-X +tools_software_languages: + - Visual Studio + - C++ + - Python + - Git + - cmake +operatingsystems: + - Windows + +further_reading: + - resource: + title: ONNX Runtime + link: https://onnxruntime.ai/docs/ + type: documentation + - resource: + title: ONNX Runtime generate() API + link: https://onnxruntime.ai/docs/genai/ + type: documentation + - resource: + title: Accelerating AI Developer Innovation Everywhere with New Arm Kleidi + link: https://newsroom.arm.com/blog/arm-kleidi + type: blog + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/_next-steps.md b/content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/_next-steps.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/PMUv3_plugin_learning_path/_next-steps.md rename to content/learning-paths/laptops-and-desktops/win_on_arm_build_onnxruntime/_next-steps.md diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/07.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/07.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/08.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/08.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/Figures/09.png b/content/learning-paths/laptops-and-desktops/win_winui3/figures/09.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_winui3/Figures/09.png rename to content/learning-paths/laptops-and-desktops/win_winui3/figures/09.png diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/how-to-1.md b/content/learning-paths/laptops-and-desktops/win_winui3/how-to-1.md index 77022c040c..5d8d88f8e2 100644 --- a/content/learning-paths/laptops-and-desktops/win_winui3/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win_winui3/how-to-1.md @@ -25,20 +25,20 @@ Before you begin the implementation, install Visual Studio 2022 with the followi 2. Universal Windows Platform development 3. After selecting these workloads, expand the .NET desktop development group under the Installation details and ensure that the 'Windows App SDK C# Templates' option is selected. -![fig1](Figures/01.png) +![fig1](figures/01.png) Then, click the 'Individual components' tab and check '.NET 6.0 Runtime (Long Term Support)'. -![fig2](Figures/02.png) +![fig2](figures/02.png) ## Create the project Open Visual Studio and click 'Create a new project'. -![fig3](Figures/03.png) +![fig3](figures/03.png) In the next window, search for the 'Blank App, Packaged (WinUI 3 in Desktop)' template. Select this template and click the 'Next' button. -![fig4](Figures/04.png) +![fig4](figures/04.png) This action opens the 'Configure your new project' window. Here, you should: @@ -47,7 +47,7 @@ This action opens the 'Configure your new project' window. Here, you should: 3. Ensure the 'Place solution and project in the same directory' option is checked 4. Click the 'Create' button -![fig5](Figures/05.png) +![fig5](figures/05.png) Your project should now be ready. Next, you will design the view using XAML declarations and implement the logic using the C# code. @@ -116,7 +116,7 @@ There are four style declarations, which differ by the 'TargetType' attribute. T Next, you will declare the following user interface: -![fig6](Figures/06.png) +![fig6](figures/06.png) This view uses a tabular layout, comprising five rows and two columns. To create such a layout using XAML, you can use the Grid control. Open the `MainWindow.xaml` file and modify it as follows: diff --git a/content/learning-paths/laptops-and-desktops/win_winui3/how-to-2.md b/content/learning-paths/laptops-and-desktops/win_winui3/how-to-2.md index 695e5ad52a..f0c5818261 100644 --- a/content/learning-paths/laptops-and-desktops/win_winui3/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/win_winui3/how-to-2.md @@ -11,18 +11,18 @@ In this section, you will launch the application using different settings to com ## Launching the application To run the application, use the dropdown lists in Visual Studio: -![fig7](Figures/07.png) +![fig7](figures/07.png) Ensure you change the 'Configuration mode' to 'Release'. Then, select the architecture, either 'x64' or 'ARM64', and click on 'Arm64.WinUIApp (Package)'. ## Compare the performance Now, you will compare the computation performance on x64 and ARM64 platforms. First, launch the application for x64. After it starts, perform calculations for the following matrix sizes: 100, 200, 300, 400, and 500. The results should resemble those in the figure below: -![fig8](Figures/08.png) +![fig8](figures/08.png) Next, launch the application for the ARM64 platform. Execute matrix multiplication for the same matrix sizes as above and note the computation times: -![fig9](Figures/09.png) +![fig9](figures/09.png) Upon comparing the execution times, it is seen that ARM64, on average, provides almost a 50% performance improvement over x64. diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/07.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/07.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/08.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/08.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/09.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/09.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/09.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/09.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/10.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/10.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/10.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/10.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/11.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/11.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/11.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/11.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/12.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/12.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/12.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/12.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/Figures/13.png b/content/learning-paths/laptops-and-desktops/win_wpf/figures/13.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_wpf/Figures/13.png rename to content/learning-paths/laptops-and-desktops/win_wpf/figures/13.png diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/how-to-1.md b/content/learning-paths/laptops-and-desktops/win_wpf/how-to-1.md index adb19efc42..015106fd51 100644 --- a/content/learning-paths/laptops-and-desktops/win_wpf/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win_wpf/how-to-1.md @@ -22,11 +22,11 @@ You can find the complete project code used in this learning path [here](https:/ ## Create the project You will now create the WPF project. Open Visual Studio and click **Create a new project**: -![fig1](Figures/01.png) +![fig1](figures/01.png) In the next window, search for the WPF Application template: -![fig2](Figures/02.png) +![fig2](figures/02.png) This will open the 'Configure your new project' view, in which you should configure the project as follows: @@ -35,11 +35,11 @@ This will open the 'Configure your new project' view, in which you should config 3. Check the option **Place solution and project in the same directory** 4. Click the **Next** button -![fig3](Figures/03.png) +![fig3](figures/03.png) In the last step, under Additional Information, choose the **.NET 8.0 (Long Term Support)** option. Finally, click the **Create** button. -![fig4](Figures/04.png) +![fig4](figures/04.png) Your project should be now ready. @@ -56,7 +56,7 @@ Proceed as follows: 1. Open the Solution Explorer in Visual Studio, right-click on the Arm64.DesktopApp.WPF project, and then select Add -> New Folder. Rename the folder to Helpers. 2. Right-click the Helpers folder, and select Add -> New Item... This will open the Add New Item window, where you should type PerformanceHelper.cs and click the Add button: -![fig5](Figures/05.png) +![fig5](figures/05.png) 3. Modify **PerformanceHelper.cs** as follows: ```cs @@ -143,7 +143,7 @@ Before starting the UI design, install the Syncfusion.SfChart.WPF NuGet package 2. In Solution Explorer, right-click on Dependencies under the Arm64.DesktopApp.WPF project. From the context menu, select Manage NuGet Packages... to open the NuGet Package Manager. 3. In the NuGet Package Manager, search for Syncfusion Chart WPF. Then select Syncfusion.SfChart.WPF from the results and click the Install button. Refer to the figure below for guidance: -![fig6](Figures/06.png) +![fig6](figures/06.png) 4. A Preview Changes window will appear. Click the Apply button to confirm and proceed with the changes to the solution. The installation of the NuGet package will complete. @@ -288,7 +288,7 @@ The chart is configured with two numerical axes. The horizontal (primary) axis d The complete XAML declaration results in the following view: -![fig7](Figures/07.png) +![fig7](figures/07.png) ## Application Logic You will now implement the application logic so that the computations will be launched when the user clicks the **Run calculations** button. Follow these steps: diff --git a/content/learning-paths/laptops-and-desktops/win_wpf/how-to-2.md b/content/learning-paths/laptops-and-desktops/win_wpf/how-to-2.md index afca1b3898..af83d4c117 100644 --- a/content/learning-paths/laptops-and-desktops/win_wpf/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/win_wpf/how-to-2.md @@ -14,29 +14,29 @@ To prepare an application for ARM64 and x86_64 builds, proceed as follows: 1. Click the **Any CPU** drop-down menu and select Configuration Manager...: -![fig8](Figures/08.png) +![fig8](figures/08.png) 2. In the Configuration Manager, click New... from the **Active Solution Platform** drop-down menu: -![fig9](Figures/09.png) +![fig9](figures/09.png) 3. In the **New Solution Platform** window, select ARM64, and click the OK button, as shown below: -![fig10](Figures/10.png) +![fig10](figures/10.png) 4. Repeat these steps to create the x86_64 solution platform. To build and run the application, select the **solution platform** (for example, x86_64) and then click the **Arm64.Desktop.WPF button**: -![fig11](Figures/11.png) +![fig11](figures/11.png) When the application launches, type the execution count value, e.g., 2000 and click the "Run calculations" button. The application will display the column with the label denoting the computation time. Repeat this for other execution counts: 4000, 6000, 8000 and 10000. Your results should look like the following figure: -![fig12](Figures/12.png) +![fig12](figures/12.png) Now, relaunch the application for the ARM64 solution platform and observe the computation times: -![fig13](Figures/13.png) +![fig13](figures/13.png) On average, we have achieved about a 30% reduction in computation times. diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/01.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/01.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/01.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/01.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/02.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/02.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/02.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/02.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/03.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/03.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/03.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/03.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/04.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/04.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/04.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/04.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/05.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/05.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/05.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/05.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/06.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/06.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/06.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/06.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/07.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/07.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/07.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/07.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/08.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/08.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/08.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/08.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/09.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/09.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/09.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/09.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/10.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/10.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/10.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/10.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/11.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/11.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/11.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/11.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/12.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/12.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/12.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/12.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/13.png b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/13.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/win_xamarin_forms/Figures/13.png rename to content/learning-paths/laptops-and-desktops/win_xamarin_forms/figures/13.png diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-1.md b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-1.md index e09c46bde6..8dcfc92c55 100644 --- a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-1.md +++ b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-1.md @@ -21,11 +21,11 @@ Before you begin the implementation, install Visual Studio 2022 with the followi ## Create the project Open Visual Studio and click 'Create a new project'. -![fig1](Figures/01.png) +![fig1](figures/01.png) In the next window, search for the 'Mobile App (Xamarin.Forms)' template. -![fig2](Figures/02.png) +![fig2](figures/02.png) This will open the 'Configure your new project' view, in which you should configure the project as follows (refer to the figure below): @@ -34,21 +34,21 @@ This will open the 'Configure your new project' view, in which you should config 3. Check the option **Place solution and project in the same directory** 4. Click the **Create** button -![fig3](Figures/03.png) +![fig3](figures/03.png) In the last step, 'New Mobile App', choose the **Blank** option in the 'Select a template for your app' list. Then, check the Android, iOS and Windows (UWP) boxes under the 'I plan to develop for:'. Finally, click the **Create** button. -![fig4](Figures/04.png) +![fig4](figures/04.png) The project creation process may take a few moments. During this process, you might be prompted to enable Developer Mode for Windows. If so, follow the instructions shown to enable Developer Mode: -![fig5](Figures/05.png) +![fig5](figures/05.png) Then, accept any other license terms that appear, including Android SDK - License Agreement. Your project should be now ready. Next, open the Solution Explorer (View -> Solution Explorer) to view the created projects: -![fig6](Figures/06.png) +![fig6](figures/06.png) There are four projects: 1. Arm64.MobileApp.XamarinForms - this is the project, containing the shared code. This project is referenced by other projects. @@ -69,7 +69,7 @@ Before starting the UI design, install the `Syncfusion.SfChart.WPF` NuGet packag 2. In Solution Explorer, right-click on 'Dependencies' under the Arm64.MobileApp.XamarinForms project. From the context menu, select 'Manage NuGet Packages...' to open the NuGet Package Manager. 3. In the NuGet Package Manager, search for 'Sf chart.' Then select 'Syncfusion.SfChart.WPF' from the results and click the 'Install' button. Refer to the figure below for guidance: -![fig7](Figures/07.png) +![fig7](figures/07.png) 4. A 'Preview Changes' window will appear. Click the 'Apply' button to confirm and proceed with the changes to the solution. The installation of the NuGet package will complete. 5. Finally, in the 'License Acceptance' window that appears, click the 'I Accept' button to agree to the terms and complete the installation process. @@ -166,7 +166,7 @@ The chart is configured with two numerical axes. The horizontal (primary) axis d The complete XAML declaration results in the following view: -![fig8](Figures/08.png) +![fig8](figures/08.png) To use the chart control on UWP you will also need to make one more modification: 1. In Solution Explorer go to `Arm64.MobileApp.XamarinForms.UWP` project. diff --git a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-2.md b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-2.md index 1d28148c70..e89d2ae731 100644 --- a/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-2.md +++ b/content/learning-paths/laptops-and-desktops/win_xamarin_forms/how-to-2.md @@ -294,15 +294,15 @@ You can now run the application to demonstrate the performance differences betwe 1. Click on the platform dropdown in Visual Studio and select 'Configuration Manager...' -![fig9](Figures/09.png) +![fig9](figures/09.png) 2. In the 'Configuration Manager' window, open the 'Active solution platform' dropdown and select -![fig10](Figures/10.png) +![fig10](figures/10.png) 3. In the 'New Solution Platform' dialog, choose 'ARM64' from the list and click the 'OK' button -![fig11](Figures/11.png) +![fig11](figures/11.png) After setting up both platforms, start by running the application using the x64 platform: @@ -311,7 +311,7 @@ After setting up both platforms, start by running the application using the x64 Now, launch the application using x64 platform. Click the Run calculations button several times, and then click the Plot results button. You should get the following: -![fig12](Figures/12.png) +![fig12](figures/12.png) Next, switch to the ARM64 platform and repeat the process: @@ -320,7 +320,7 @@ Next, switch to the ARM64 platform and repeat the process: You should notice shorter computation times on the ARM64 platform, demonstrating its performance advantage: -![fig13](Figures/13.png) +![fig13](figures/13.png) ## Summary In this learning path, you developed an application using Xamarin.Forms, allowing it to run on multiple platforms while sharing the same codebase. The application included functionality to measure and compare computation times, specifically focusing on matrix multiplication performance. By running the application on both x64 and Arm64 platforms, you were able to observe and compare the performance. The results showed a noticeable performance advantage on the Arm64 architecture, highlighting its efficiency in handling computation-intensive tasks. diff --git a/content/learning-paths/laptops-and-desktops/windows_armpl/2a-get-set-up-git.md b/content/learning-paths/laptops-and-desktops/windows_armpl/2a-get-set-up-git.md index 23f85cfe6a..18908c6bce 100644 --- a/content/learning-paths/laptops-and-desktops/windows_armpl/2a-get-set-up-git.md +++ b/content/learning-paths/laptops-and-desktops/windows_armpl/2a-get-set-up-git.md @@ -19,7 +19,7 @@ The example application renders a rotating 3D cube to perform the calculations u First, navigate to an empty directory, and clone the repository containing the example from GitHub: ```cmd -git clone https://github.com/odincodeshen/SpinTheCubeInGDI.git +git clone https://github.com/arm/SpinTheCubeInGDI.git ``` {{% notice Note %}} diff --git a/content/learning-paths/laptops-and-desktops/windows_armpl/_index.md b/content/learning-paths/laptops-and-desktops/windows_armpl/_index.md index 4d7fffb5e8..8c5f809893 100644 --- a/content/learning-paths/laptops-and-desktops/windows_armpl/_index.md +++ b/content/learning-paths/laptops-and-desktops/windows_armpl/_index.md @@ -23,6 +23,7 @@ tools_software_languages: - Visual Studio - C# - .NET + - Arm Performance Libraries operatingsystems: - Windows diff --git a/content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/actions.png b/content/learning-paths/laptops-and-desktops/windows_cicd_github/images/actions.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/actions.png rename to content/learning-paths/laptops-and-desktops/windows_cicd_github/images/actions.png diff --git a/content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/ghrunner_1.png b/content/learning-paths/laptops-and-desktops/windows_cicd_github/images/ghrunner_1.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/ghrunner_1.png rename to content/learning-paths/laptops-and-desktops/windows_cicd_github/images/ghrunner_1.png diff --git a/content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/ghrunner_2.png b/content/learning-paths/laptops-and-desktops/windows_cicd_github/images/ghrunner_2.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/ghrunner_2.png rename to content/learning-paths/laptops-and-desktops/windows_cicd_github/images/ghrunner_2.png diff --git a/content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/ghrunner_3.png b/content/learning-paths/laptops-and-desktops/windows_cicd_github/images/ghrunner_3.png similarity index 100% rename from content/learning-paths/laptops-and-desktops/windows_cicd_github/Images/ghrunner_3.png rename to content/learning-paths/laptops-and-desktops/windows_cicd_github/images/ghrunner_3.png diff --git a/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-run.md b/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-run.md index 75d124cf81..4452e19a5c 100644 --- a/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-run.md +++ b/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-run.md @@ -13,7 +13,7 @@ To test this CI/CD workflow, select `Actions` from the repository menu. You will be presented with the option to start with a `Simple workflow` that gives you a file with the minimum necessary structure to build, test and deploy your code. Click `Configure`. -![ghrunner3 #center](Images/ghrunner_3.png) +![ghrunner3 #center](images/ghrunner_3.png) A file named `blank.yml` will be created for you under `/.github/workflows/`. Optionally rename this to something more meaningful (`hello.yml`). @@ -44,6 +44,6 @@ In the `PowerShell` window, you will see the progress of the Action: ``` Similarly in the `Actions` tab of your repository: -![actions #center](Images/actions.png) +![actions #center](images/actions.png) Congratulations, you have successfully run a simple GitHub Actions CI/CD workflow on your Windows on Arm machine. diff --git a/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-setup.md b/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-setup.md index e70e63debf..31aa7c22d8 100644 --- a/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-setup.md +++ b/content/learning-paths/laptops-and-desktops/windows_cicd_github/runner-setup.md @@ -40,6 +40,6 @@ Current runner version: '2.303.0' Return to the `Actions->Runners` page in your GitHub repository and you should now see your runner displayed similar to the below: -![ghrunner2 #center](Images/ghrunner_2.png) +![ghrunner2 #center](images/ghrunner_2.png) You are now ready to test a simple CI/CD GitHub Actions workflow on your Windows on Arm runner. diff --git a/content/learning-paths/mobile-graphics-and-gaming/_index.md b/content/learning-paths/mobile-graphics-and-gaming/_index.md index 205be57b6a..ee87a4b1d2 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/_index.md +++ b/content/learning-paths/mobile-graphics-and-gaming/_index.md @@ -11,14 +11,14 @@ key_ip: maintopic: true operatingsystems_filter: - Android: 28 -- Linux: 23 +- Linux: 24 - macOS: 10 - Windows: 11 subjects_filter: - Gaming: 6 - Graphics: 6 - ML: 10 -- Performance and Architecture: 26 +- Performance and Architecture: 27 subtitle: Optimize Android apps and build faster games using cutting-edge Arm tech title: Mobile, Graphics, and Gaming tools_software_languages_filter: @@ -43,7 +43,7 @@ tools_software_languages_filter: - Coding: 16 - ExecuTorch: 1 - Frame Advisor: 1 -- GCC: 9 +- GCC: 10 - GenAI: 2 - GoogleTest: 1 - Java: 5 @@ -65,7 +65,7 @@ tools_software_languages_filter: - PyTorch: 1 - QEMU: 1 - RME: 1 -- Runbook: 14 +- Runbook: 15 - Rust: 2 - SDDiskTool: 1 - SVE2: 1 diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/FA_Sphinx.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/FA_Sphinx.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/FA_Sphinx.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/FA_Sphinx.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/FA_render_graph_1.1.gif b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/FA_render_graph_1.1.gif similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/FA_render_graph_1.1.gif rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/FA_render_graph_1.1.gif diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/FA_step_drawcalls.gif b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/FA_step_drawcalls.gif similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/FA_step_drawcalls.gif rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/FA_step_drawcalls.gif diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Pillars.gif b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Pillars.gif similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Pillars.gif rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Pillars.gif diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Render_graph_egypt.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Render_graph_egypt.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Render_graph_egypt.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Render_graph_egypt.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Render_graph_egypt_redundant_attachments.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Render_graph_egypt_redundant_attachments.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Render_graph_egypt_redundant_attachments.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Render_graph_egypt_redundant_attachments.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Render_graph_egypt_redundant_rps.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Render_graph_egypt_redundant_rps.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/Render_graph_egypt_redundant_rps.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/Render_graph_egypt_redundant_rps.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/_index.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/_index.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/_index.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/_index.md diff --git a/content/learning-paths/servers-and-cloud-computing/funASR/_next-steps.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/_next-steps.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/funASR/_next-steps.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/_next-steps.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/analyze.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/analyze.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/analyze.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/analyze.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/analyze_geometry.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/analyze_geometry.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/analyze_geometry.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/analyze_geometry.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/analyze_render_graph.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/analyze_render_graph.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/analyze_render_graph.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/analyze_render_graph.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/capture_a_trace.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/capture_a_trace.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/capture_a_trace.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/capture_a_trace.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_analysis.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_analysis.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_analysis.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_analysis.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_capture.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_capture.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_capture.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_capture.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_connect.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_connect.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_connect.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_connect.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_content_metrics.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_content_metrics.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_content_metrics.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_content_metrics.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_detailed_snakes.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_detailed_snakes.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_detailed_snakes.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_detailed_snakes.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_detailed_sphinx.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_detailed_sphinx.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_detailed_sphinx.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_detailed_sphinx.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_frame_hierarchy.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_frame_hierarchy.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_frame_hierarchy.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_frame_hierarchy.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_launch_screen.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_launch_screen.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_launch_screen.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_launch_screen.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_show_descriptions.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_show_descriptions.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_show_descriptions.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_show_descriptions.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_snakes.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_snakes.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_snakes.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_snakes.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_sort_vse.png b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_sort_vse.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/fa_sort_vse.png rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/fa_sort_vse.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/feedback.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/feedback.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/feedback.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/feedback.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/what_is_frame_advisor.md b/content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/what_is_frame_advisor.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Analyze_a_frame_with_Frame_Advisor/what_is_frame_advisor.md rename to content/learning-paths/mobile-graphics-and-gaming/analyze_a_frame_with_frame_advisor/what_is_frame_advisor.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/add-camera.md b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/add-camera.md index 08fbb56ed8..9e541e40ef 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/add-camera.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/add-camera.md @@ -88,7 +88,7 @@ To modify the application view, open `activity_main.xml` and replace the file co After adding this code, re-run the application. The above declarations render the following: -![img8](Figures/08.png) +![img8](figures/08.png) The view now contains two buttons: Start and Stop. They will be used to start and stop the camera preview. Below the buttons, there is a checkbox that will enable or disable real-time image processing. Though not visible, below the checkbox there is a `JavaCameraView`. @@ -145,7 +145,7 @@ override fun onCreate(savedInstanceState: Bundle?) { The above code checks if the app has the CAMERA permission. If the permission is not granted, it requests the permission using `ActivityCompat.requestPermissions`, passing the `Manifest.permission.CAMERA` and the cameraPermissionRequestCode (100). When you re-run the application, you should see the following screen: -![img9](Figures/09.png) +![img9](figures/09.png) Click the **While using the app** button to enable camera access. You are now ready to capture images from the camera. @@ -392,7 +392,7 @@ class MainActivity : AppCompatActivity(), CameraBridgeViewBase.CvCameraViewListe After re-launching the application and pressing the Start button you will see the application is accessing the camera. However, the images are not yet displayed. -![img10](Figures/10.png) +![img10](figures/10.png) ## Display images To display images, you need to modify the onCameraFrame method so that the image from the camera is displayed in the ImageView. You will also declare Mat objects to store the image from the camera. @@ -458,4 +458,4 @@ To test the code, you will need to run the application on an actual device. Foll After running the app, remember to grant permissions to the camera. Then, click the Start button, and the application will display images from the camera: -![img11](Figures/11.jpg) +![img11](figures/11.jpg) diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/create-project.md b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/create-project.md index aeda0c3260..51bca84c0c 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/create-project.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/create-project.md @@ -13,11 +13,11 @@ Follow these steps to create a project and add OpenCV: 1. Open Android Studio on your development machine and then click the **+ New Project** icon: -![img1](Figures/01.png) +![img1](figures/01.png) 2. In the New Project window, select **Empty Views Activity**: -![img2](Figures/02.png) +![img2](figures/02.png) 3. Configure the project as follows (see figure below): - Name: **Arm64.OpenCV.Camera**. @@ -27,13 +27,13 @@ Follow these steps to create a project and add OpenCV: - Minimum SDK: **API 24**. - Build configuration language: **Kotlin DSL**. -![img3](Figures/03.png) +![img3](figures/03.png) 4. Click the **Finish** button. The project will be ready in a few moments, and Android Studio should appear as shown below: -![img4](Figures/04.png) +![img4](figures/04.png) ## Add OpenCV support To add OpenCV for Arm64, open the *build.gradle.ts (Module: app)*, and add the following line under the dependencies: @@ -44,7 +44,7 @@ implementation("org.opencv:opencv:4.10.0") Then, click the **Sync Now** link in the top pane that appears: -![img5](Figures/05.png) +![img5](figures/05.png) From here on, you can use OpenCV in your application. @@ -56,7 +56,7 @@ To initialize OpenCV and check the initialization status, follow these steps: 2. Click the highlighted icon in the top-right corner to switch to the XML view. -![img6](Figures/06.png) +![img6](figures/06.png) 3. Modify the `activity_main.xml` file as shown below: @@ -129,6 +129,6 @@ Finally, you call updateControls(), which updates the UI based on whether OpenCV ## Launch the application You can now launch the application in the simulator. To do so, use the top menu, where you click Run -> Run ‘app’. The application should start, and you should see the “OpenCV initialized” text as shown below: -![img7](Figures/07.png) +![img7](figures/07.png) You have successfully added OpenCV to the Android application. In the next step, you will extend the application to get images from the device’s camera and process them in real-time. diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/01.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/01.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/01.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/01.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/02.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/02.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/02.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/02.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/03.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/03.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/03.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/03.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/04.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/04.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/04.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/04.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/05.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/05.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/05.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/05.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/06.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/06.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/06.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/06.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/07.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/07.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/07.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/07.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/08.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/08.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/08.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/08.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/09.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/09.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/09.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/09.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/10.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/10.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/10.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/10.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/11.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/11.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/11.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/11.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/12.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/12.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/Figures/12.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/figures/12.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/process-images.md b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/process-images.md index c8fb21d2c0..3984ff22b6 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/process-images.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_camera/process-images.md @@ -88,5 +88,5 @@ You use 21 as the size of the block to use for calculating the threshold for eac After re-running the application, starting the preview, and enabling the processing, you should see the following result: -![img12](Figures/12.jpg) +![img12](figures/12.jpg) diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/create-project.md b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/create-project.md index e007a76564..c53ac71805 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/create-project.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/create-project.md @@ -16,11 +16,11 @@ Follow these steps to create a project and add OpenCV: 1. Open Android Studio on your development machine and then click the **+ New Project** icon: -![img1](Figures/01.png) +![img1](figures/01.png) 2. In the New Project window, select **Empty Views Activity**: -![img2](Figures/02.png) +![img2](figures/02.png) 3. Configure the project as follows (see figure below): - Name: **Arm64.OpenCV.FaceDetection**. @@ -30,7 +30,7 @@ Follow these steps to create a project and add OpenCV: - Minimum SDK: **API 24**. - Build configuration language: **Kotlin DSL**. -![img3](Figures/03.png) +![img3](figures/03.png) 4. Click the **Finish** button. @@ -51,7 +51,7 @@ You will now initialize OpenCV and prepare the application to stream frames from 1. Under the Project in the left window, double-click *app/res/layout/activity_main.xml*. This opens the view designer. 2. Click the highlighted icon in the top right corner to switch to the XML view. -![img4](Figures/04.png) +![img4](figures/04.png) 3. Modify the *activity_main.xml* as shown below: @@ -218,7 +218,7 @@ The above code also declares CameraBridgeViewBase.CvCameraViewListener2 methods: ## Launch the application You have just prepared the application to stream camera frames. You can now launch the application in the emulator. To do so, click the green 'run' button as shown here: -![click run](Figures/05.png) +![click run](figures/05.png) The application should start, and you should see the app running in an emulator. It will ask you for camera permissions. diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/face-detection.md b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/face-detection.md index f68a8e362b..44d3e030b1 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/face-detection.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/face-detection.md @@ -194,7 +194,7 @@ If at least one face is detected, you retrieve the first detected face, and base After running the application, click the Start button, and you will see a green rectangle around the detected face as shown below: -![img5](Figures/06.jpg) +![img5](figures/06.jpg) ## Summary In this Learning Path, you learned how to use the Haar cascade classifier to detect faces in camera images. You added the pre-trained Haar cascade XML file for face detection to the project’s assets directory. You implemented the loadHaarCascade method to load the Haar cascade file from the assets and initialize the CascadeClassifier. You also created the getPath method to convert the Haar cascade asset file into a file path that can be used by OpenCV. diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/01.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/01.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/01.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/01.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/02.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/02.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/02.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/02.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/03.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/03.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/03.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/03.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/04.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/04.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/04.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/04.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/05.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/05.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/05.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/05.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/06.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/06.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/Figures/06.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_facedetection/figures/06.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/create-project.md b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/create-project.md index 9b10a48d17..dd95ee50be 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/create-project.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/create-project.md @@ -16,7 +16,7 @@ Start by creating a new project, and configuring it: 1. Open Android Studio on your development machine, and click the **+ New Project** icon. 2. In the **New Project** window, select **Empty Views Activity**: -![img1 alt-text#center](Figures/01.png "Figure 1: Creating a new project.") +![img1 alt-text#center](figures/01.png "Figure 1: Creating a new project.") 3. Figure 2 shows you how to configure the project: - Name: **Arm64.KleidiCV.Demo**. @@ -26,7 +26,7 @@ Start by creating a new project, and configuring it: - Minimum SDK: **API 24**. - Build configuration language: **Kotlin DSL**. -![img2 alt-text#center](Figures/02.png "Figure 2: Configuring your new project.") +![img2 alt-text#center](figures/02.png "Figure 2: Configuring your new project.") 4. Click the **Finish** button. diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/01.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/01.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/01.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/01.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/02.png b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/02.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/02.png rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/02.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/03.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/03.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/03.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/03.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/04.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/04.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/04.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/04.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/05.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/05.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/05.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/05.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/06.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/06.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/06.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/06.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/07.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/07.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/07.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/07.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/08.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/08.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/08.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/08.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/09.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/09.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/09.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/09.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/10.jpg b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/10.jpg similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/Figures/10.jpg rename to content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/figures/10.jpg diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/process-images.md b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/process-images.md index 325f92f285..fadf9d4fbc 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/process-images.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_opencv_kleidicv/process-images.md @@ -398,10 +398,10 @@ When you do so, click the **Load image** button, select the image processing ope You will see the processing results and a detailed performance analysis as Figures 3-6 show. As a reminder, this Learning Path was tested on a Samsung Galaxy S22. -![img3](Figures/03.jpg) -![img4](Figures/04.jpg) -![img5](Figures/05.jpg) -![img6](Figures/06.jpg) +![img3](figures/03.jpg) +![img4](figures/04.jpg) +![img5](figures/05.jpg) +![img6](figures/06.jpg) ## Performance uplift To appreciate the performance uplift offered by KleidiCV, now switch to one of the earliest OpenCV versions, which does not have KleidiCV. @@ -420,10 +420,10 @@ implementation("org.opencv:opencv:4.9.0") Now click the **Sync Now** button, and deploy the app to the Android device. -![img7](Figures/07.jpg) -![img8](Figures/08.jpg) -![img9](Figures/09.jpg) -![img10](Figures/10.jpg) +![img7](figures/07.jpg) +![img8](figures/08.jpg) +![img9](figures/09.jpg) +![img10](figures/10.jpg) This particular example achieves the following performance uplift: diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/01.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/01.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/01.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/01.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/02.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/02.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/02.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/02.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/03.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/03.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/03.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/03.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/04.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/04.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/04.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/04.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/05.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/05.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/05.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/05.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/06.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/06.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/06.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/06.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/07.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/07.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/07.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/07.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/08.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/08.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/08.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/08.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/09.png b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/09.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_sve2/Figures/09.png rename to content/learning-paths/mobile-graphics-and-gaming/android_sve2/figures/09.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part1.md b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part1.md index b4f07ec3d6..9a8664e0d6 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part1.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part1.md @@ -28,23 +28,23 @@ The process of creating and configuring this project is similar to that used for 1. Open Android Studio on your development machine and then click the **+ New Project** icon: -![img1](Figures/01.png) +![img1](figures/01.png) 2. Create a project using the **Native C++ Project** option in the Phone and Tablet section and click Next, as shown below: -![img2](Figures/02.png) +![img2](figures/02.png) 3. Set the application name to **ARM SVE2**, select `Kotlin` as the language, leave the Minimum SDK selection as `API 24: Android 7.0 (Nougat)`, ensure that the build configuration is set to use Kotlin DSL, and click Next, as shown below: -![img3](Figures/03.png) +![img3](figures/03.png) 4. Leave the `Toolchain Default` selection for C++ Standard and then click Finish, as shown below: -![img4](Figures/04.png) +![img4](figures/04.png) You have just created the Android application project, which comprises a `MainActivity`. The `MainActivity` contains a single TextView control, which displays the Hello from C++ string as shown in the figure below. This string is retrieved by invoking a `stringFromJNI` function from the C++ library, implemented under the `app/cpp/native-lib.cpp` file and built using the NDK and `CMakeLists.txt`. -![img5](Figures/05.png) +![img5](figures/05.png) SVE2 support is not enabled by default. To enable it, open the `CMakeLists.txt` (under app/cpp), and modify it by adding the following target_compile_options: @@ -160,7 +160,7 @@ There are two elements of the above code snippet that warrant further discussion To enable Kotlin code to access the native library, we need to create so-called JNI functions for the `runCalculations` method. To do this, place the cursor on the line where the `runCalculations` method is declared. Then, press Alt+Enter, and select **Create JNI function for runCalculations** from the menu that appears. -![img6](Figures/06.png) +![img6](figures/06.png) This action will navigate you to the native-lib.cpp file, which you will implement in the next step. diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part2.md b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part2.md index ca06dc4451..c2918b3313 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part2.md +++ b/content/learning-paths/mobile-graphics-and-gaming/android_sve2/part2.md @@ -201,13 +201,13 @@ This method generates three pseudo-random vectors of floats and a fourth vector ## Build and Run the code To test the code described above, you will need an Armv8-powered device (for this example, we used a Samsung Galaxy S22). Follow [these steps](https://developer.android.com/studio/run/device) to prepare your device for development. Once your phone is configured, it will appear in the drop-down list at the top of Android Studio. -![img7](Figures/07.png) +![img7](figures/07.png) Select your phone and then click the play icon. This action will build and deploy the application to your device. Next, enter the vector length, for example, 10,000, and click the **Run calculations** button. Repeat the process after selecting the "Use SVE2?" checkbox. You should observe the following results: -![img8](Figures/08.png) +![img8](figures/08.png) -![img9](Figures/09.png) +![img9](figures/09.png) As has just been demonstrated, using SVE2 intrinsics can achieve computation times that are 3-4 times shorter, depending on the vector length. diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/5-render-a-simple-3D-object-part-1.md b/content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/5-render-a-simple-3d-object-part-1.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/5-render-a-simple-3D-object-part-1.md rename to content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/5-render-a-simple-3d-object-part-1.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/6-render-a-simple-3D-object-part-2.md b/content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/6-render-a-simple-3d-object-part-2.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/6-render-a-simple-3D-object-part-2.md rename to content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/6-render-a-simple-3d-object-part-2.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/7-Profiling-App-using-Streamline.md b/content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/7-profiling-app-using-streamline.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/7-Profiling-App-using-Streamline.md rename to content/learning-paths/mobile-graphics-and-gaming/android_webgpu_dawn/7-profiling-app-using-streamline.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/1-dev-env-setup.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/1-dev-env-setup.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/1-dev-env-setup.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/1-dev-env-setup.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/2-executorch-setup.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/2-executorch-setup.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/2-executorch-setup.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/2-executorch-setup.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/3-Understanding-LLaMA-models.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/3-understanding-llama-models.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/3-Understanding-LLaMA-models.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/3-understanding-llama-models.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/4-Prepare-LLaMA-models.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/4-prepare-llama-models.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/4-Prepare-LLaMA-models.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/4-prepare-llama-models.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/5-Run-Benchmark-on-Android.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/5-run-benchmark-on-android.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/5-Run-Benchmark-on-Android.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/5-run-benchmark-on-android.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/6-Build-Android-Chat-App.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/6-build-android-chat-app.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/6-Build-Android-Chat-App.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/6-build-android-chat-app.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/_index.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/_index.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/_index.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/_index.md diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM/_next-steps.md b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/_next-steps.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/vLLM/_next-steps.md rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/_next-steps.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/device-explorer-upload.png b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/device-explorer-upload.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/device-explorer-upload.png rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/device-explorer-upload.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/device-explorer.png b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/device-explorer.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/device-explorer.png rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/device-explorer.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/example-prompt-1.png b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/example-prompt-1.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/example-prompt-1.png rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/example-prompt-1.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/example-prompt-2.png b/content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/example-prompt-2.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Build-Llama3-Chat-Android-App-Using-Executorch-And-XNNPACK/example-prompt-2.png rename to content/learning-paths/mobile-graphics-and-gaming/build-llama3-chat-android-app-using-executorch-and-xnnpack/example-prompt-2.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/get-started-with-arm-asr/04-generic_library.md b/content/learning-paths/mobile-graphics-and-gaming/get-started-with-arm-asr/04-generic_library.md index e907c3b66b..22e2830b25 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/get-started-with-arm-asr/04-generic_library.md +++ b/content/learning-paths/mobile-graphics-and-gaming/get-started-with-arm-asr/04-generic_library.md @@ -8,7 +8,7 @@ layout: learningpathall ## Introduction -Follow these steps to implement **Arm Accuracy Super Resolution (Arm ASR)** in your custom engine. +Follow these steps to implement **Arm Accuracy Super Resolution (Arm ASR)** in your custom engine. Arm ASR is an optimized variant of [Fidelity Super Resolution 2](https://github.com/GPUOpen-LibrariesAndSDKs/FidelityFX-SDK/blob/main/docs/techniques/super-resolution-temporal.md) (FSR2) that includes extensive mobile-specific optimizations, ensuring high performance on mobile devices. @@ -20,7 +20,6 @@ You can integrate Arm ASR into your custom engine using one of two methods: See the following sections to learn how to configure Arm ASR: - [Quality presets](#quality-presets). -- [Performance](#performance). - [Shader variants and extensions](#shader-variants-and-extensions). - [Input resources](#input-resources). - [Providing motion vectors](#providing-motion-vectors). @@ -45,7 +44,7 @@ See the following sections to learn how to configure Arm ASR: cd accuracy-super-resolution-generic-library ``` -2. Set a variable for the package location for easy reference. +2. Set a variable for the package location for easy reference. You will use this path to refer to files in the repository: @@ -161,36 +160,6 @@ FfxErrorCode ffxmFsr2GetRenderResolutionFromUpscalingRatio( FfxmFsr2UpscalingRatio upscalingRatio) ``` -## Performance -Depending on your target hardware and operating configuration, Arm ASR will operate at different performance levels. The table below compares the rendering performance of two Arm GPUs (Immortalis-G715 and Immortalis-G720) when using different upscaling settings at two target resolutions. - - -| Target resolution | Quality | Upscaling Ratio | Immortalis-G715 | Immortalis-G720 | -|-------------------|--------------------|-----------|-----------------|-----------------| -| 2800x1260 | Quality | 1.5x | 6.5 ms | 4.1 ms | -| | | 1.7x | 6.3 ms | 3.8 ms | -| | | 2x | 6.1 ms | 3.3 ms | -| | Balanced | 1.5x | 5.8 ms | 3.3 ms | -| | | 1.7x | 5.4 ms | 3.1 ms | -| | | 2x | 4.7 ms | 2.8 ms | -| | Performance | 1.5x | 5.4 ms | 3.2 ms | -| | | 1.7x | 5.3 ms | 2.9 ms | -| | | 2x | 4.6 ms | 2.5 ms | -| 2400x1080 | Quality | 1.5x | 5.3 ms | 2.9 ms | -| | | 1.7x | 4.8 ms | 2.7 ms | -| | | 2x | 4.3 ms | 2.5 ms | -| | Balanced | 1.5x | 4.2 ms | 2.5 ms | -| | | 1.7x | 4.0 ms | 2.3 ms | -| | | 2x | 3.6 ms | 2.2 ms | -| | Performance | 1.5x | 4.1 ms | 2.4 ms | -| | | 1.7x | 3.7 ms | 2.1 ms | -| | | 2x | 3.6 ms | 2 ms | - ## Shader Variants and Extensions **Unless you are using the prebuilt shaders with the standalone VK backend**, be aware of the following definitions when integrating Arm ASR shaders: diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/02-ML-in-games.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/02-ml-in-games.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/02-ML-in-games.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/02-ml-in-games.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/03-Install.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/03-install.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/03-Install.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/03-install.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/04-The-Dr-Arm-Game.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/04-the-dr-arm-game.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/04-The-Dr-Arm-Game.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/04-the-dr-arm-game.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/05-Machine-learning-in-Unity.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/05-machine-learning-in-unity.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/05-Machine-learning-in-Unity.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/05-machine-learning-in-unity.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/06-The-Unity-project.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/06-the-unity-project.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/06-The-Unity-project.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/06-the-unity-project.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/07-Objects-and-Components.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/07-objects-and-components.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/07-Objects-and-Components.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/07-objects-and-components.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/08-Training-our-ML-agents.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/08-training-our-ml-agents.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/08-Training-our-ML-agents.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/08-training-our-ml-agents.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/09-Optimizations.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/09-optimizations.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/09-Optimizations.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/09-optimizations.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/10-Summary.md b/content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/10-summary.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/10-Summary.md rename to content/learning-paths/mobile-graphics-and-gaming/using_unity_machine_learning_agents/10-summary.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/1-devenv-and-model.md b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/1-devenv-and-model.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/1-devenv-and-model.md rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/1-devenv-and-model.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/2-generate-apk.md b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/2-generate-apk.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/2-generate-apk.md rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/2-generate-apk.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/3-benchmark.md b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/3-benchmark.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/3-benchmark.md rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/3-benchmark.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/Loading_page.png b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/Loading_page.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/Loading_page.png rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/Loading_page.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/_index.md b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/_index.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/_index.md rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/_index.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/_next-steps.md b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/background.md b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/background.md similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/background.md rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/background.md diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/chat2.png b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/chat2.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/chat2.png rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/chat2.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/chat_page.png b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/chat_page.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/chat_page.png rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/chat_page.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/example-picture.png b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/example-picture.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/example-picture.png rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/example-picture.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/example.png b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/example.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/example.png rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/example.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/install_ndk.png b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/install_ndk.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/install_ndk.png rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/install_ndk.png diff --git a/content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/open_project.png b/content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/open_project.png similarity index 100% rename from content/learning-paths/mobile-graphics-and-gaming/Vision-LLM-inference-on-Android-with-KleidiAI-and-MNN/open_project.png rename to content/learning-paths/mobile-graphics-and-gaming/vision-llm-inference-on-android-with-kleidiai-and-mnn/open_project.png diff --git a/content/learning-paths/servers-and-cloud-computing/_index.md b/content/learning-paths/servers-and-cloud-computing/_index.md index 8dbd81bfc3..bea38eabf4 100644 --- a/content/learning-paths/servers-and-cloud-computing/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/_index.md @@ -8,7 +8,7 @@ key_ip: maintopic: true operatingsystems_filter: - Android: 2 -- Linux: 135 +- Linux: 138 - macOS: 10 - Windows: 14 pinned_modules: @@ -22,8 +22,8 @@ subjects_filter: - Containers and Virtualization: 27 - Databases: 15 - Libraries: 8 -- ML: 24 -- Performance and Architecture: 49 +- ML: 25 +- Performance and Architecture: 51 - Storage: 1 - Web: 10 subtitle: Optimize cloud native apps on Arm for performance and cost @@ -52,6 +52,7 @@ tools_software_languages_filter: - AWS Elastic Kubernetes Service (EKS): 3 - AWS Graviton: 1 - Bash: 1 +- bash: 1 - Bastion: 3 - BOLT: 1 - bpftool: 1 @@ -80,10 +81,10 @@ tools_software_languages_filter: - Fortran: 1 - FunASR: 1 - FVP: 4 -- GCC: 20 +- GCC: 21 - gdb: 1 - Geekbench: 1 -- GenAI: 10 +- GenAI: 11 - GitHub: 6 - GitLab: 1 - Glibc: 1 @@ -104,7 +105,7 @@ tools_software_languages_filter: - libbpf: 1 - Linaro Forge: 1 - Litmus7: 1 -- LLM: 8 +- LLM: 9 - llvm-mca: 1 - LSE: 1 - MariaDB: 1 @@ -123,13 +124,13 @@ tools_software_languages_filter: - PAPI: 1 - perf: 4 - PostgreSQL: 4 -- Python: 25 +- Python: 26 - PyTorch: 8 - RAG: 1 - Redis: 3 - Remote.It: 2 - RME: 4 -- Runbook: 63 +- Runbook: 65 - Rust: 2 - snappy: 1 - Snort3: 1 @@ -150,7 +151,7 @@ tools_software_languages_filter: - Vectorscan: 1 - Veraison: 1 - Visual Studio Code: 4 -- vLLM: 1 +- vLLM: 2 - VS Code: 1 - vvenc: 1 - Whisper: 1 diff --git a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/_index.md b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/_index.md index 09f29aed1f..61e81630f2 100644 --- a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/_index.md @@ -1,24 +1,17 @@ --- -title: Build a CCA Attestation Service in AWS with Veraison - -draft: true -cascade: - draft: true +title: Build a CCA Attestation Service on AWS with Veraison minutes_to_complete: 90 -who_is_this_for: This Learning Path is for developers who understand the basics of CCA attestation and the Veraison project, and who wish to progress onto creating a more scalable deployment of a CCA attestation verifier service in the cloud. - +who_is_this_for: This Learning Path is for developers familiar with CCA attestation and the Veraison project. You'll learn how to deploy a scalable CCA attestation verifier service on AWS. learning_objectives: - - Create an attestation service in the AWS cloud using components from the Veraison project. - - Prepare the Veraison service to act as a verifier for Arm CCA attestation tokens by provisioning CCA platform endorsements. - + - Build an attestation service on AWS using the Veraison project's components. + - Set up Veraison as a verifier for Arm CCA attestation tokens by provisioning CCA platform endorsements. prerequisites: - - An [AWS account](/learning-paths/servers-and-cloud-computing/csp/aws/) for accessing AWS cloud services. - - An x86 computer running Ubuntu or Arch Linux, which is authorized to use the AWS account. Other build environments might be possible, but will require the configuration of toolchains for cross-compilation. - + - An [AWS account](/learning-paths/servers-and-cloud-computing/csp/aws/) with access to AWS services. + - An x86 computer running Ubuntu or Arch Linux, authorized for AWS access. If you're using another build environment, you'll need to configure the toolchains for cross-compilation. author: Paul Howard @@ -26,6 +19,7 @@ author: Paul Howard skilllevels: Advanced subjects: Performance and Architecture armips: + - Neoverse - Cortex-A operatingsystems: - Linux @@ -33,9 +27,6 @@ tools_software_languages: - CCA - Runbook - - - further_reading: - resource: title: RATS architecture (RFC 9334) @@ -50,8 +41,6 @@ further_reading: link: https://datatracker.ietf.org/doc/draft-ietf-rats-ar4si/ type: documentation - - ### FIXED, DO NOT MODIFY # ================================================================================ weight: 1 # _index.md always has weight of 1 to order correctly diff --git a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/aws-account-prep.md b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/aws-account-prep.md index 15aa0786c8..5ae2079cbf 100644 --- a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/aws-account-prep.md +++ b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/aws-account-prep.md @@ -6,20 +6,21 @@ weight: 3 layout: learningpathall --- -## Prepare Your AWS Account -For this learning path, you will need an active AWS account. If you do not have an AWS account, please refer to the [AWS documentation](https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-creating.html). +## Prepare your AWS account +You’ll need an active AWS account for this Learning Path. If you don't have one yet, refer to the [AWS documentation](https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-creating.html). -This learning path assumes that you have administrator level privileges for your AWS account. +{{% notice Note %}} +This Learning Path assumes that you have administrator-level privileges for your AWS account. {{% /notice %}} -## Install the AWS Command-Line Tools -For this section, you will need the AWS Command-Line (CLI) tools. Please refer to the [AWS documentation](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) for the steps needed to install the latest version of the AWS CLI. +## Install AWS command-line tools +You’ll need the AWS Command-Line Interface (CLI) installed for this section. Follow the [AWS documentation](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) to install the latest version. -## Set Up Authentication -You will need to configure your local environment to authenticate with the AWS cloud in order to build the Veraison deployment. +## Set up authentication +You'll need to set up your local environment to authenticate with AWS before deploying Veraison. -The recommended way to do this is using Single Sign-On (SSO). The steps to do this are documented in Veraison's documentation [here](https://github.com/veraison/services/tree/main/deployments/aws#aws-account). +The recommended method is Single Sign-On (SSO). Follow the steps in Veraison's documentation [here](https://github.com/veraison/services/tree/main/deployments/aws#aws-account). -It is important to ensure that authentication is configured correctly. The best way to do this is to run a simple command-line operation such as the following: +To confirm authentication is configured correctly, run a simple command, such as: ```bash aws ec2 describe-availability-zones @@ -66,4 +67,4 @@ You should see output similar to the following (depending on which AWS region yo ] } ``` -If this operation fails, please do not attempt to proceed with the next steps of this learning path. Refer to [AWS documentation](https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-authentication.html) for help with troubleshooting this step. +If this operation fails, pause here and troubleshoot using the [AWS documentation](https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-authentication.html) before continuing. diff --git a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/deployment.md b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/deployment.md index b8fe73a878..7a6df84242 100644 --- a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/deployment.md +++ b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/deployment.md @@ -9,7 +9,7 @@ layout: learningpathall ## Create the Veraison Deployment Now that your AWS account, internet domain and certificate are prepared, you are ready to deploy the Veraison services into AWS. -This process is highly automated, but it takes some time, because a number of resources need to be created in AWS. Be prepared for this step to take from 30 to 60 minutes, although there won't be too much for you to do during this time. You will just run a command to kick off the process. +This process is highly automated, but will take between 30 to 60 minutes, as several resources need to be created in AWS. The deployment process is documented in [Veraison's GitHub repository](https://github.com/veraison/services/blob/main/deployments/aws/README.md). @@ -25,7 +25,7 @@ make bootstrap ``` Once your build environment is bootstrapped, you will use the [Quickstart](https://github.com/veraison/services/tree/main/deployments/aws#quickstart) procedure to provide some AWS configuration and create the deployment. -You need to provide your AWS account-specific configuration that specifies the IDs of the VPC and subnets that will be used for the deployment as well as the CIDR that will be granted access to the deployment. In this deployment you will use `misc/arm.cfg` file for example. Make sure you update `VERAISON_AWS_REGION` to the same region where you created your AWS Certificate for your new domain.`VERAISON_AWS_DNS_NAME` will need to match the domain name you chose. +You'll provide AWS-specific settings, including the IDs for your VPC and subnets, and the CIDR block allowed access to the deployment. In this deployment, you will use `misc/arm.cfg` file for example. Make sure you update `VERAISON_AWS_REGION` to the same region where you created your AWS certificate for your new domain.`VERAISON_AWS_DNS_NAME` needs to match the domain name you chose. Once the account-specific config file is created, define `AWS_ACCOUNT_CFG` environment variable to point to it and then create the deployment. @@ -35,7 +35,7 @@ export AWS_ACCOUNT_CFG=misc/arm.cfg # replace with path to your config make deploy ``` -You do not need to use the end-to-end flow as described in the document. Later in this learning path, you will perform some additional steps to prepare and use the Veraison services. +You do not need to use the end-to-end flow as described in the document. Later in this Learning Path, you will perform some additional steps to prepare and use the Veraison services. The rest of the document provides additional information about how to manage the deployment, but you don't need this now. @@ -45,15 +45,15 @@ In the command shell where you ran the steps above, run the following command: ```bash veraison status ``` -This command will output a status report for the deployment. If successful, it will include information about:- +This command outputs a status report for the deployment. If successful, it includes information about: - The Amazon Machine Images (AMIs) that have been used for the servers. - The status of the VPC stack, support stack and services stack. All of these should read as `created`. - Information about RDS, ElastiCache and EC2 resources in the deployment. - The version of the Veraison software that is running. - The public part of the key that is used to sign attestation results (known as the EAR Verification Key). -- A list of media types that Veraison will accept as attestation evidence. -- A list of media types that Veraison will accept as endorsements. +- A list of media types that Veraison accepts as attestation evidence. +- A list of media types that Veraison accepts as endorsements. Use the following command to test the REST API endpoint of the verification service. Remember to substitute `example-veraison.com` with the domain name that you used in the initial step, but you will need to keep the `services` prefix as shown. @@ -69,6 +69,6 @@ Use the following command to test the REST API endpoint of the endorsement provi curl https://services.example-veraison.com:9443/.well-known/veraison/provisioning ``` -This command will produce JSON output containing the list of supported media types for endorsement. +This command produces JSON output containing the list of supported media types for endorsement. -Your Veraison services are now deployed and working, and you can proceed to the next step. +Your Veraison services are now successfully deployed - you're ready for the next step. \ No newline at end of file diff --git a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/domain-and-cert.md b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/domain-and-cert.md index ed1258d5eb..37cf7ee988 100644 --- a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/domain-and-cert.md +++ b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/domain-and-cert.md @@ -7,51 +7,72 @@ layout: learningpathall --- ## Create Your Domain in Route53 -Veraison provides cloud services for attestation. These services are published on the internet and are accessible via HTTPS using RESTful APIs. Like all cloud services, they require a suitable internet domain that allows the consumers of those services to locate them. Domains are named using string labels separated by dots (full stops). You will be familiar with domain names such as `www.amazon.com` - they allow public internet resources to be located conveniently and routed using shared internet infrastructure such as DNS. +Veraison provides cloud services for attestation. These services are published on the internet and are accessible via HTTPS using RESTful APIs. Like all cloud services, they need a domain so users can easily find and access them. Domains are named using string labels separated by dots. You will be familiar with domain names such as `www.amazon.com` - they allow public internet resources to be located conveniently and routed using shared internet infrastructure such as DNS. +### What is Route53? -[Route53](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/Welcome.html) is an AWS service that allows you to register and manage domains. In order to create your Veraison deployment in AWS, you will first need to choose a domain name and register it with Route53. +[Route53](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/Welcome.html) is an AWS service that allows you to register and manage domains. In order to create your Veraison deployment in AWS, you first need to choose a domain name and register it with Route53. -Your domain name _must_ be unique and specific to your Veraison deployment. Remember that this domain name will be used to create public internet services, so it cannot clash with any other domains that exist on the public internet. In this learning path, we will use `example-veraison.com` as an illustrative example of a domain name. However, _do not_ use this name for your own deployment. Choose your own name, and use it in place of the example as you progress through the learning path. +### Choosing Your Domain Name -The easiest way to create your domain is using the Route53 dashboard in the AWS Management Console. Using your web browser and AWS account credentials, sign into the console and search for the Route53 service. This will take you to the Route53 dashboard. Locate the domain registration option, which will look something like the image below. Remember to use your own domain name where highlighted, not the `example-veraison.com` domain. +Your domain name must be unique and specific to your Veraison deployment. Remember that this domain name is for creating public internet services, so it cannot clash with any other domains that exist on the public internet. -**Note:** If you have an existing domain and you wish to transfer it into Route53, instead of creating a new domain, you can use this option as well as shown below. Otherwise, the instructions here assume that you are creating a new domain. +In this Learning Path, you will use `example-veraison.com` as an illustrative example of a domain name. However, do not use this name for your own deployment. Choose your own name, and use it in place of the example as you progress through the Learning Path. + +### Registering Your Domain with Route53 + +The easiest way to create your domain is using the Route53 dashboard in the AWS Management Console: + +* Using your web browser and AWS account credentials, sign into the console and search for the Route53 service. This takes you to the Route53 dashboard. +* Locate the domain registration option, which looks something like the image below. Remember to use your own domain name where highlighted, not the `example-veraison.com` domain. + +{{% notice Note %}} +If you have an existing domain, you can transfer it into Route53 instead of registering a new one, as shown below. Otherwise, the instructions here assume that you are creating a new domain. +{{% /notice %}} ![Register Domain](./create-domain.png) -Route53 will check the domain name for you to ensure that it doesn't clash with others on the internet. Provided that there isn't a clash, Route53 will give you the option of registering your chosen domain name, or some alternatives. For example, it might suggest you could use `example-veraison.net` or `example-veraison.org`. +Route53 checks your domain name to make sure it’s unique on the internet. Provided that there isn't a clash, Route53 gives you the option of registering your chosen domain name, or some alternatives. For example, it might suggest you could use `example-veraison.net` or `example-veraison.org`. -Route53 will charge an annual fee for the domain registration, and the size of this fee can differ depending on your name choice. Choose the name that you would like to use, and proceed to complete the registration process using the on-screen instructions in your browser. +Route53 charges an annual fee for domain registration, which varies depending on your chosen name. Choose the name that you would like to use, and proceed to complete the registration process using the on-screen instructions in your browser. ## Request Your Domain Certificate -Veraison publishes secure cloud services that can be reached on the internet using HTTPs. This means that you will need a certificate for your registered domain. A certificate will allow consumers of your Veraison services to trust those services and connect with them securely. -The [AWS Certificate Manager (ACM)](https://aws.amazon.com/certificate-manager/) can be used to issue a certificate for your domain. Navigate to this service within the AWS Management Console in your web browser. +### Why Do I Need a Domain Certificate? +Veraison publishes secure cloud services that can be reached on the internet using HTTPs. You need a domain certificate so users can securely and confidently connect to your Veraison services. -Select "Request Certificate" from the menu on the left as shown. +### Requesting a Certificate with AWS Certificate Manager +You can use the [AWS Certificate Manager (ACM)](https://aws.amazon.com/certificate-manager/) to issue a certificate for your domain: + +* Navigate to this service within the AWS Management Console in your web browser. + +* Select **Request Certificate** from the menu on the left as shown. ![Request Certificate](./request-certificate.png) -On the first page of the certificate wizard, select the option to request a **public certificate** as shown below and click **Next**. +On the first page of the certificate wizard, select **Request a public certificate**, then click **Next**. ![Request Public Certificate](./request-public-certificate.png) -The next page of the wizard will be used to set other properties of the certificate, starting with the domain name. The primary domain name of the certificate must precisely match the domain name that you registered with Route53 in the previous step. However, the Veraison deployment will also create a number of named services nested within that primary domain. In order for the certificate to adequately cover all of those named services, you need to give the certificate an additional name, which uses an asterisk (*) wildcard as shown below. Remember, once again, substitute your chosen domain name to use in place of `example-veraison.com`. +The next page of the wizard is for setting other properties of the certificate, starting with the domain name. The primary domain name of the certificate must precisely match the domain name that you registered with Route53 in the previous step. However, the Veraison deployment also creates a number of named services nested within that primary domain. + +In order for the certificate to adequately cover all of those named services, you need to give the certificate an additional name, which uses an asterisk (*) wildcard as shown below. Remember, once again, substitute your chosen domain name to use in place of `example-veraison.com`. Use the **Add another name to this certificate** button to create the secondary name with the wildcard. ![Set Certificate Names](./set-cert-names.png) -For the validation method, you should use **DNS validation**, which will be the recommended default. You can also use the default **RSA 2048** for the certificate key algorithm as shown below. +For the validation method, you should use **DNS validation**, which is the recommended default. You can also use the default **RSA 2048** for the certificate key algorithm as shown below. ![Certificate Validation and Key Algorithm](./cert-validation-and-algorithm.png) Now click **Request** to request the certificate from the Certificate Manager. -Before AWS can issue the certificate, it will need to check that you own the domain. Since you have registered the domain in Route53 in the previous step, this will be straightforward. +### Validating Your Domain Ownership + +Before AWS can issue the certificate, it checks that you own the domain. Since you registered the domain in Route53 earlier, this is straightforward. Use the ACM dashboard to view the certificate. You will see that it has a status of "Pending Validation". You will also see the two associated domains: `example-veraison.com` and `*.example-veraison.com`, or whatever your chosen domain name is. -Click the button that says **Create records in Route 53**. This will allow AWS to prove that you own the domain, at which point it will issue the certificate, and the status will change from "Pending Validation" to "Issued". Be aware that this process can take up to about half an hour. +Click **Create records in Route 53** to confirm domain ownership. AWS then issues the certificate, and its status changes from **Pending Validation** to **Issued**. Be aware that this process can take up to about half an hour. Once your domain and certificate are prepared, you are ready to create your Veraison deployment. diff --git a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/endorsements.md b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/endorsements.md index 26e6cb3f46..29adb6c969 100644 --- a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/endorsements.md +++ b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/endorsements.md @@ -17,7 +17,7 @@ cd $HOME git clone https://git.codelinaro.org/linaro/dcap/cca-demos/poc-endorser ``` ## Configure the Endorsement Tool for AWS -By default, the endorsement tool assumes that your Veraison services are deployed locally on your machine. This is not the case here, because your Veraison services have been deployed into AWS instead. Therefore, you will need to provide some configuration to the tool, in order to point it at the correct API endpoints with the required authentication. +By default, the endorsement tool assumes that your Veraison services are deployed locally on your machine. This is not the case here, because your Veraison services have been deployed into AWS instead. You'll need to configure the tool to point it at the correct API endpoints with the required authentication. In the command shell where you created the AWS deployment of Veraison, run the following command: @@ -69,7 +69,7 @@ Now run the following command to provision the endorsements: ```bash make endorse ``` -This command will run the Docker container and send the CCA endorsements to Veraison. You should see output similar to the following: +This command runs the Docker container and sends the CCA endorsements to your AWS-hosted Veraison deployment. You should see output similar to the following: ```output docker run --network=host "cca-demo/endorser" @@ -88,7 +88,7 @@ Next, return to the command shell where you created the Veraison AWS deployment, cd $HOME/services/deployments/aws veraison stores ``` -This command will query Veraison's database stores. If the CCA endorsements were provisioned successfully, the output should look something like the example below. (You don't need to be concerned with understanding all of the detail here.) +This command will query Veraison's database stores. If the CCA endorsements were provisioned successfully, the output should look something like the example below. (You don't need to understand all of the detail here.) ```output TRUST ANCHORS: @@ -304,4 +304,4 @@ ARM_CCA://0/f0VMRgIBAQAAAAAAAAAAAAMAPgABAAAAUFgAAAAAAAA= POLICIES: --------- ``` -Your Veraison deployment is now complete and ready to act as an attestation verification service for pre-silicon Arm CCA platforms. +Your Veraison deployment is now ready to act as an attestation verification service for pre-silicon Arm CCA platforms. \ No newline at end of file diff --git a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/overview.md b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/overview.md index 0f21604dee..4a60120e68 100644 --- a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/overview.md +++ b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/overview.md @@ -1,18 +1,24 @@ --- -title: "Overview: Deploying Veraison in AWS" +title: "Overview" weight: 2 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Overview -[Veraison](https://github.com/veraison) is a community open-source project that is part of the [Confidential Computing Consortium (CCC)](https://confidentialcomputing.io). Veraison provides the components that are needed to build attestation verification services for confidential computing or other use cases. Veraison acts as the Verifier role in the [RATS architecture (RFC9334)](https://datatracker.ietf.org/doc/rfc9334/), which is a common model for attestation-based systems. Veraison makes use of community standardization efforts to ensure a high degree of interoperability. +## Deploying Veraison on AWS +[Veraison](https://github.com/veraison) is a community open-source project that is part of the [Confidential Computing Consortium (CCC)](https://confidentialcomputing.io). Veraison provides components for building attestation verification services for confidential computing and other use cases. -Attestation is essential for confidential computing, and Veraison can be used as the verifier service for Arm's Confidential Compute Architecture (CCA). If you have not already familiarized yourself with CCA attestation and Veraison, it is recommended that you first follow the learning paths [Get Started with CCA Attestation and Veraison](https://learn.arm.com/learning-paths/servers-and-cloud-computing/cca-veraison/) and [Run an end-to-end Attestation Flow with Arm CCA](https://learn.arm.com/learning-paths/servers-and-cloud-computing/cca-essentials/). +Veraison acts as the Verifier role in the [RATS architecture (RFC9334)](https://datatracker.ietf.org/doc/rfc9334/), which is a common model for attestation-based systems. Veraison makes use of community standardization efforts to ensure a high degree of interoperability. -The two learning paths linked above make use of a Veraison verification service that is published and maintained by [Linaro](https://www.linaro.org). +Attestation is essential for confidential computing, and Veraison acts as a verifier for Arm's Confidential Compute Architecture (CCA). -In this learning path, you will create and publish your own Veraison verification service in the AWS cloud. After you complete the learning path, you will be able to go back through the steps of the previous two learning paths, and use your own AWS-hosted Veraison service instead of the one hosted by Linaro. +{{% notice Learning Tip %}} +If you're new to CCA attestation and Veraison, you will benefit from first completing the Learning Paths [Get Started with CCA Attestation and Veraison](https://learn.arm.com/learning-paths/servers-and-cloud-computing/cca-veraison/) and [Run an end-to-end Attestation Flow with Arm CCA](https://learn.arm.com/learning-paths/servers-and-cloud-computing/cca-essentials/). These two Learning Paths above use a Veraison verification service hosted by [Linaro](https://www.linaro.org). +{{% /notice %}} -AWS is not the only way to deploy Veraison, but we will adopt it here as an example of using public cloud infrastructure. You can read about other types of deployment in the [Veraison project README](https://github.com/veraison/services?tab=readme-ov-file#services). +In this Learning Path, you'll create and deploy your own Veraison verification service on AWS. After completing this Learning Path, you'll be able to revisit the two Learning Paths mentioned above, using your own AWS-hosted Veraison service instead of the one hosted by Linaro. + +{{% notice Note%}} +AWS isn't the only deployment option for Veraison, but you'll use it here as an example of deploying on public cloud infrastructure. For other deployment methods, see the [Veraison project README](https://github.com/veraison/services?tab=readme-ov-file#services). +{{% /notice %}} \ No newline at end of file diff --git a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/use-verifier.md b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/use-verifier.md index 9778fb5952..212f44f0a5 100644 --- a/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/use-verifier.md +++ b/content/learning-paths/servers-and-cloud-computing/cca-veraison-aws/use-verifier.md @@ -9,10 +9,10 @@ layout: learningpathall ## Use Your AWS Deployment to Verify a CCA Attestation Token Now that your Veraison services are deployed into AWS and initialized with endorsements for the CCA reference platform, you are ready to make use of the verification service to verify a CCA attestation token. -To do this, you should follow the steps set out in the learning path [Get Started with CCA Attestation and Veraison](https://learn.arm.com/learning-paths/servers-and-cloud-computing/cca-veraison/). However, you should follow this learning path in such a way that it uses your AWS deployment of Veraison, instead of the service provided by Linaro. +To do this, you should follow the steps set out in the Learning Path [Get Started with CCA Attestation and Veraison](https://learn.arm.com/learning-paths/servers-and-cloud-computing/cca-veraison/). Follow the steps in this Learning Path exactly, except you'll use your AWS-hosted Veraison deployment instead of Linaro's service. The URL for the Veraison server provided by Linaro is `https://veraison.test.linaro.org:8443`. -Instead if using this URL, you should use the URL for your Veraison service, which will be of the form ` https://services.example-veraison.com:8443`, although you will need to replace `example-veraison.com` with your own registered AWS domain. +Instead of this URL, use the one for your own Veraison service, which will be of the form `https://services.example-veraison.com:8443`, although you need to replace `example-veraison.com` with your AWS domain. -Apart from this URL change, all other steps in the learning path remain the same. +Apart from this URL change, all other steps in the Learning Path remain the same. diff --git a/content/learning-paths/servers-and-cloud-computing/clickhouse/_index.md b/content/learning-paths/servers-and-cloud-computing/clickhouse/_index.md index f05659cfce..de40abbf25 100644 --- a/content/learning-paths/servers-and-cloud-computing/clickhouse/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/clickhouse/_index.md @@ -20,8 +20,8 @@ subjects: Databases armips: - Neoverse tools_software_languages: - - ClickHouse - - ClickBench + - ClickHouse + - ClickBench operatingsystems: - Linux @@ -29,8 +29,6 @@ test_images: - ubuntu:latest test_link: https://github.com/armflorentlebeau/arm-learning-paths/actions/runs/4312122327 test_maintenance: true -test_status: -- passed further_reading: - resource: diff --git a/content/learning-paths/servers-and-cloud-computing/clickhouse/clickhouse.md b/content/learning-paths/servers-and-cloud-computing/clickhouse/clickhouse.md index 62b3a3460c..5d5459d1eb 100644 --- a/content/learning-paths/servers-and-cloud-computing/clickhouse/clickhouse.md +++ b/content/learning-paths/servers-and-cloud-computing/clickhouse/clickhouse.md @@ -12,7 +12,7 @@ ClickBench is open-source software used to evaluate the performance of various d ## Before you begin -You will need an Arm server or an [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud service provider running a recent version of Ubuntu for Arm. +You will need an Arm server or an [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud service provider running a recent version of Ubuntu for Arm. You will also need sufficient storage on the instance for the web-analytics dataset used for measuring ClickHouse performance, 500 GB is recommended. @@ -28,7 +28,7 @@ curl https://clickhouse.com/ | sh sudo DEBIAN_FRONTEND=noninteractive ./clickhouse install ``` -2. Set the compression method for `clickhouse-server` to use `zstd` by running the commands: +2. Set the compression method for `clickhouse-server` to use `zstd` by running the commands: ```bash echo " @@ -44,7 +44,7 @@ compression: sudo clickhouse start ``` -## Run ClickBench +## Run ClickBench 1. Clone the ClickBench repository: @@ -60,9 +60,9 @@ cd ClickBench/clickhouse clickhouse-client < create.sql ``` -3. Load the benchmark data +3. Load the benchmark data -The data file is very large and takes more than 10 minutes to download and uncompress. +The data file is very large and takes more than 10 minutes to download and uncompress. ```console wget --continue 'https://datasets.clickhouse.com/hits_compatible/hits.tsv.gz' @@ -71,7 +71,7 @@ gzip -d hits.tsv.gz 4. Import the data using `clickhouse-client`: -Importing the data takes more than 5 minutes. +Importing the data takes more than 5 minutes. ```console clickhouse-client --time --query "INSERT INTO hits FORMAT TSV" < hits.tsv @@ -79,17 +79,17 @@ clickhouse-client --time --query "INSERT INTO hits FORMAT TSV" < hits.tsv The data used is the [Anonymized Web Analytics dataset](https://clickhouse.com/docs/en/getting-started/example-datasets/metrica/). -5. Execute the benchmark +5. Execute the benchmark The script loops through each query three times. A total of 43 queries are run. -```bash +```console ./run.sh ``` ### ClickBench results -When you execute the `run.sh` script, the query processing time for each individual query is displayed on the console. +When you execute the `run.sh` script, the query processing time for each individual query is displayed on the console. The three comma separated values represent the query latency time for each of the three times the query is run. @@ -101,6 +101,6 @@ The three comma separated values represent the query latency time for each of th The summarized results are also saved to the `results.csv` file in the current directory. The `results.csv` file has 129 lines (43 queries each run 3 times). -You can try different types of hardware and compare results. For example, if you use AWS try c6g.2xlarge and compare to c7g.2xlarge to see the difference between AWS Graviton2 and Graviton3 processors. +You can try different types of hardware and compare results. For example, if you use AWS try c6g.2xlarge and compare to c7g.2xlarge to see the difference between AWS Graviton2 and Graviton3 processors. diff --git a/content/learning-paths/servers-and-cloud-computing/codec/_index.md b/content/learning-paths/servers-and-cloud-computing/codec/_index.md index 6c335b2b83..7511d082a6 100644 --- a/content/learning-paths/servers-and-cloud-computing/codec/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/codec/_index.md @@ -1,30 +1,38 @@ --- -armips: -- Neoverse -author: Pareena Verma -layout: learningpathall +title: Run x265 (H.265 codec) on Arm servers + +minutes_to_complete: 10 + +who_is_this_for: This is an introductory topic for software developers who want to + build and run an x265 codec on Arm servers and measure performance. + + learning_objectives: - Build x265 codec on Arm server - Run x265 codec on Arm server with the same video of various resolutions and encoding presets to measure the performance impact -learning_path_main_page: 'yes' -minutes_to_complete: 10 -operatingsystems: -- Linux + prerequisites: - An [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from an appropriate cloud service provider. This Learning Path has been verified on AWS EC2 and Oracle cloud services, running `Ubuntu Linux 20.04.` -skilllevels: Introductory -subjects: Libraries + +author: Pareena Verma + test_images: - ubuntu:latest test_link: null test_maintenance: true -test_status: -- passed -title: Run x265 (H.265 codec) on Arm servers + +### Tags +skilllevels: Introductory +subjects: Libraries +armips: +- Neoverse tools_software_languages: - x265 +operatingsystems: +- Linux + further_reading: - resource: title: x265 Documentation @@ -43,8 +51,10 @@ further_reading: link: https://community.arm.com/arm-community-blogs/b/operating-systems-blog/posts/oracle-cloud-infrastructure-arm-based-a1 type: blog - -weight: 1 -who_is_this_for: This is an introductory topic for software developers who want to - build and run an x265 codec on Arm servers and measure performance. +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/codec/x265.md b/content/learning-paths/servers-and-cloud-computing/codec/x265.md index 602d6713e8..fa53cf3ab5 100644 --- a/content/learning-paths/servers-and-cloud-computing/codec/x265.md +++ b/content/learning-paths/servers-and-cloud-computing/codec/x265.md @@ -8,25 +8,42 @@ weight: 2 `x265` is an open-source H.265/HVEC encoder that offers very high compression efficiency and performance. There have been significant efforts to optimize the open-source libx265 implementation of the H.265 encoder on Arm Neoverse platforms which supports Neon instructions. The optimized code is available on [Bitbucket](https://bitbucket.org/multicoreware/x265_git/) -Install GCC for your Arm Linux distribution. Refer to the [install guide](/install-guides/gcc/native/) for additional information. +Install GCC for your Arm Linux distribution. Refer to the [install guide](/install-guides/gcc/native/) for additional information. Install `Cmake` and other dependencies: ```bash -sudo apt install git wget cmake cmake-curses-gui -y +sudo apt update +sudo apt install wget git cmake cmake-curses-gui build-essential -y ``` -Install `Build-essential` and `yasm` -```bash -sudo apt install build-essential yasm -y +If you are running on x86, also install `yasm` +```console +sudo apt install yasm -y ``` ## Download and build x265 source +Clone the repository and navigate to the `linux` directory. + ```bash git clone https://bitbucket.org/multicoreware/x265_git.git cd x265_git/build/linux +``` + +Run the interactive bash script to change the default flags for the build. Then, run `make`. + +```console ./make-Makefiles.bash make ``` + +{{ % notice Note % }} +If you encounter an error regarding an unknown value, you can change the `ENABLE_NEON_I8MM` flag to `OFF` using the interactive bash script in the previous step. + +```output +error: unknown value ‘armv9-a+i8mm+sve2’ for ‘-march’ +``` +{{ % /notice % }} + For detailed instructions refer to this [README](https://bitbucket.org/multicoreware/x265_git/src/master/build/README.txt). ## Download video streams to run x265 on and measure performance @@ -53,7 +70,7 @@ To benchmark the performance of `x265`, using the `1080P` video file, run the co You can vary the preset settings on the different resolution images and measure the impact on performance. For full usage instructions, refer to the [command line documentation](https://x265.readthedocs.io/en/master/cli.html), or see the help: -```bash { cwd="x265_git/build/linux",ret_code="1" } +```console ./x265 --help ``` diff --git a/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/_index.md b/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/_index.md index 89924f5aef..714e5c72e1 100644 --- a/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/_index.md @@ -7,15 +7,15 @@ cascade: minutes_to_complete: 30 -who_is_this_for: This is an introductory topic for developers interested in running DeepSeek-R1 on Arm-based servers. +who_is_this_for: This Learning Path is for developers who want to run DeepSeek-R1 on Arm-based servers. learning_objectives: - - Download and build llama.cpp on your Arm server. + - Clone and build llama.cpp on your Arm-based server. - Download a pre-quantized DeepSeek-R1 model from Hugging Face. - - Run the pre-quantized model on your Arm CPU and measure the performance. + - Run the model on your Arm CPU and benchmark its performance. prerequisites: - - An [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud service provider or an on-premise Arm server. This Learning Path was tested on an AWS Graviton4 r8g.24xlarge instance. + - An [Arm-based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud provider or an on-premise Arm server. This Learning Path was tested on an AWS Graviton4 r8g.24xlarge instance. author: - Tianyu Li diff --git a/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-chatbot.md b/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-chatbot.md index 7ba5604cc8..f365cae002 100644 --- a/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-chatbot.md +++ b/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-chatbot.md @@ -6,17 +6,21 @@ weight: 3 layout: learningpathall --- -## Before you begin +## Background and what you'll build + The instructions in this Learning Path are for any Arm server running Ubuntu 24.04 LTS. You need an Arm server instance with at least 64 cores and 512GB of RAM to run this example. Configure disk storage up to at least 400 GB. The instructions have been tested on an AWS Graviton4 r8g.24xlarge instance. +Arm CPUs are widely used in ML and AI use cases. In this Learning Path, you will learn how to run a generative AI inference-based use case of a LLM chatbot on Arm-based CPUs by deploying the [DeepSeek-R1 671B LLM](https://huggingface.co/bartowski/DeepSeek-R1-GGUF) on your Arm-based CPU using `llama.cpp`, optimized for Arm hardware. You'll: + +- Build and run `llama.cpp` with Arm-specific performance improvements. +- Download a quantized GGUF model from Hugging Face. +- Run and measure performance on a large Arm instance (e.g., AWS Graviton4). -## Overview -Arm CPUs are widely used in traditional ML and AI use cases. In this Learning Path, you learn how to run generative AI inference-based use cases like a LLM chatbot on Arm-based CPUs. You do this by deploying the [DeepSeek-R1 GGUF models](https://huggingface.co/bartowski/DeepSeek-R1-GGUF) on your Arm-based CPU using `llama.cpp`. [llama.cpp](https://github.com/ggerganov/llama.cpp) is an open source C/C++ project developed by Georgi Gerganov that enables efficient LLM inference on a variety of hardware - both locally, and in the cloud. -## About the DeepSeek-R1 model and GGUF model format +## Understanding the DeepSeek-R1 model and GGUF format The [DeepSeek-R1 model](https://huggingface.co/deepseek-ai/DeepSeek-R1) from DeepSeek-AI available on Hugging Face, is released under the [MIT License](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE) and free to use for research and commercial purposes. @@ -24,9 +28,9 @@ The DeepSeek-R1 model has 671 billion parameters, based on Mixture of Experts(Mo Traditionally, the training and inference of LLMs has been done on GPUs using full-precision 32-bit (FP32) or half-precision 16-bit (FP16) data type formats for the model parameter and weights. Recently, a new binary model format called GGUF was introduced by the `llama.cpp` team. This new GGUF model format uses compression and quantization techniques that remove the dependency on using FP32 and FP16 data type formats. For example, GGUF supports quantization where model weights that are generally stored as FP16 data types are scaled down to 4-bit integers. This significantly reduces the need for computational resources and the amount of RAM required. These advancements made in the model format and the data types used make Arm CPUs a great fit for running LLM inferences. -## Install dependencies +## Install build dependencies on your Arm-based server -Install the following packages on your Arm based server instance: +Install the following packages: ```bash sudo apt update @@ -40,7 +44,7 @@ sudo apt install gcc g++ -y sudo apt install build-essential -y ``` -## Download and build llama.cpp +## Clone and build llama.cpp You are now ready to start building `llama.cpp`. @@ -101,7 +105,7 @@ general: ``` -## Install Hugging Face Hub +## Set up Hugging Face and download the model There are a few different ways you can download the DeepSeek-R1 model. In this Learning Path, you download the model from Hugging Face. @@ -137,19 +141,19 @@ huggingface-cli download bartowski/DeepSeek-R1-GGUF --include "*DeepSeek-R1-Q4_0 ``` Before you proceed and run this model, take a quick look at what `Q4_0` in the model name denotes. -## Quantization format +## Understanding the Quantization format `Q4_0` in the model name refers to the quantization method the model uses. The goal of quantization is to reduce the size of the model (to reduce the memory space required) and faster (to reduce memory bandwidth bottlenecks transferring large amounts of data from memory to a processor). The primary trade-off to keep in mind when reducing a model's size is maintaining quality of performance. Ideally, a model is quantized to meet size and speed requirements while not having a negative impact on performance. This model is `DeepSeek-R1-Q4_0-00001-of-00010.gguf`, so what does each component mean in relation to the quantization level? The main thing to note is the number of bits per parameter, which is denoted by 'Q4' in this case or 4-bit integer. As a result, by only using 4 bits per parameter for 671 billion parameters, the model drops to be 354 GB in size. -## Run the pre-quantized DeepSeek-R1 LLM model weights on your Arm-based server +## Run the DeepSeek-R1 Chatbot on your Arm server As of [llama.cpp commit 0f1a39f3](https://github.com/ggerganov/llama.cpp/commit/0f1a39f3), Arm has contributed code for performance optimization with three types of GEMV/GEMM kernels corresponding to three processor types: -* AWS Graviton2, where you only have NEON support (you will see less improvement for these GEMV/GEMM kernels), -* AWS Graviton3, where the GEMV/GEMM kernels exploit both SVE 256 and MATMUL INT8 support, and -* AWS Graviton4, where the GEMV/GEMM kernels exploit NEON/SVE 128 and MATMUL_INT8 support +* AWS Graviton2, where you only have NEON support (you will see less improvement for these GEMV/GEMM kernels). +* AWS Graviton3, where the GEMV/GEMM kernels exploit both SVE 256 and MATMUL INT8 support. +* AWS Graviton4, where the GEMV/GEMM kernels exploit NEON/SVE 128 and MATMUL_INT8 support. With the latest commits in `llama.cpp` you will see improvements for these Arm optimized kernels directly on your Arm-based server. You can run the pre-quantized Q4_0 model as is and do not need to re-quantize the model. @@ -161,7 +165,9 @@ Run the pre-quantized DeepSeek-R1 model exactly as the weights were downloaded f This command will use the downloaded model (`-m` flag), disable conversation mode explicitly (`-no-cnv` flag), adjust the randomness of the generated text (`--temp` flag), with the specified prompt (`-p` flag), and target a 512 token completion (`-n` flag), using 64 threads (`-t` flag). -You may notice there are many gguf files downloaded, llama.cpp can load all series of files by passing the first one with `-m` flag. +You might notice there are many gguf files. Llama.cpp can load all series of files by passing the first one with `-m` flag. + +## Analyze the output and performance statistics You will see lots of interesting statistics being printed from llama.cpp about the model and the system, followed by the prompt and completion. The tail of the output from running this model on an AWS Graviton4 r8g.24xlarge instance is shown below: @@ -374,10 +380,10 @@ llama_perf_context_print: total time = 42340.53 ms / 531 tokens The `system_info` printed from llama.cpp highlights important architectural features present on your hardware that improve the performance of the model execution. In the output shown above from running on an AWS Graviton4 instance, you will see: - * NEON = 1 This flag indicates support for Arm's Neon technology which is an implementation of the Advanced SIMD instructions - * ARM_FMA = 1 This flag indicates support for Arm Floating-point Multiply and Accumulate instructions - * MATMUL_INT8 = 1 This flag indicates support for Arm int8 matrix multiplication instructions - * SVE = 1 This flag indicates support for the Arm Scalable Vector Extension + * NEON = 1 This flag indicates support for Arm's Neon technology which is an implementation of the Advanced SIMD instructions. + * ARM_FMA = 1 This flag indicates support for Arm Floating-point Multiply and Accumulate instructions. + * MATMUL_INT8 = 1 This flag indicates support for Arm int8 matrix multiplication instructions. + * SVE = 1 This flag indicates support for the Arm Scalable Vector Extension. The end of the output shows several model timings: @@ -386,5 +392,7 @@ The end of the output shows several model timings: * prompt eval time refers to the time taken to process the prompt before generating the new text. * eval time refers to the time taken to generate the output. Generally anything above 10 tokens per second is faster than what humans can read. +## What's next? + You have successfully run a LLM chatbot with Arm KleidiAI optimizations, all running on your Arm AArch64 CPU on your server. You can continue experimenting and trying out the model with different prompts. diff --git a/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-server.md b/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-server.md index dfc1ca43b5..c1302ace6e 100644 --- a/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-server.md +++ b/content/learning-paths/servers-and-cloud-computing/deepseek-cpu/deepseek-server.md @@ -6,6 +6,8 @@ weight: 4 layout: learningpathall --- +## Start the LLM server with llama.cpp + You can use the `llama.cpp` server program and submit requests using an OpenAI-compatible API. This enables applications to be created which access the LLM multiple times without starting and stopping it. You can also access the server over the network to another machine hosting the LLM. @@ -45,7 +47,7 @@ curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/jso }' 2>/dev/null | jq -C ``` -The `model` value in the API is not used, you can enter any value. This is because there is only one model loaded in the server. +The `model` value is ignored by the server, so you can use any placeholder string. This is because there is only one model loaded in the server. Run the script: @@ -90,9 +92,11 @@ The `curl` command accesses the LLM and you see the output: } ``` +## Inspect the JSON output + In the returned JSON data you see the LLM output, including the content created from the prompt. -## Use Python +## Access the API using Python You can also use a Python program to access the OpenAI-compatible API. @@ -121,7 +125,7 @@ client = OpenAI( completion = client.chat.completions.create( model="not-used", messages=[ - {"role": "system", "content": "You are a coding assistant, skilled in programming.."}, + {"role": "system", "content": "You are a coding assistant, skilled in programming..."}, {"role": "user", "content": "Write a hello world program in C++."} ], stream=True, @@ -137,6 +141,9 @@ Run the Python file (make sure the server is still running): python ./python-test.py ``` +## Example Output + + You see the output generated by the LLM: ```output @@ -192,4 +199,6 @@ When compiled and run, this program will display: Hello World! ``` +## What's next? + You can continue to experiment with different large language models and write scripts to try them. diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/EBS.png b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/EBS.png new file mode 100644 index 0000000000..13f8422e2e Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/EBS.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/IOPS.png b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/IOPS.png new file mode 100644 index 0000000000..37d9d49bde Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/IOPS.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/_index.md b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/_index.md new file mode 100644 index 0000000000..90cb9d9675 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/_index.md @@ -0,0 +1,45 @@ +--- +title: Microbenchmark Storage Performance with fio + +draft: true +cascade: + draft: true + +minutes_to_complete: 30 + +who_is_this_for: This is an introductory topic for developers seeking to optimize storage costs and performance, identify bottlenecks, and navigate storage considerations during application migration across platforms. + +learning_objectives: + - Understand the flow of data for storage devices. + - Use basic observability utilities such as iostat, iotop and pidstat. + - Understand how to run fio for microbenchmarking a block storage device. + +prerequisites: + - An [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud service provider or an Arm Linux server. + - Familiarity with Linux. + +author: Kieran Hejmadi + +### Tags +skilllevels: Introductory +subjects: Performance and Architecture +armips: + - Neoverse +tools_software_languages: + - bash + - Runbook +operatingsystems: + - Linux + +further_reading: + - resource: + title: Fio documentation + link: https://fio.readthedocs.io/en/latest/fio_doc.html#running-fio + type: documentation + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/characterising-workload.md b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/characterising-workload.md new file mode 100644 index 0000000000..af58524576 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/characterising-workload.md @@ -0,0 +1,134 @@ +--- +title: Characterizing a workload +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Basic Characteristics + +The basic attributes of a given workload are the following. + +- IOPS +- I/O Size +- Throughput +- Read to Write Ratio +- Random vs Sequential access + +There are many more characteristics to observe, such as latency, but since this is an introductory topic you will mostly stick to the high-level metrics listed above. + +## Run an Example Workload + +Connect to an Arm-based server or cloud instance. + +As an example workload, you can use the media manipulation tool, FFMPEG, on an AWS `t4g.medium` instance. The `t4g.medium` is an Arm-based (AWS Graviton2) virtual machine with 2 vCPUs, 4 GiB of memory, and is designed for general-purpose workloads with a balance of compute, memory, and network resources. + +First, install the required tools. + +```bash +sudo apt update +sudo apt install ffmpeg iotop -y +``` + +Download the popular reference video for transcoding, `BigBuckBunny.mp4`, which is available under the [Creative Commons 3.0 License](https://creativecommons.org/licenses/by/3.0/). + +```bash +cd ~ +mkdir src && cd src +wget http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4 +``` + +Run the following command to begin transcoding the video and audio using the `H.264` and `aac` transcoders respectively. The `-flush_packets` flag forces FFMPEG to write each chunk of video data from memory to storage immediately, rather than buffering it in memory. This reduces the risk of data loss in case of a crash and allows you to observe more frequent disk writes during the transcoding process. + +```bash +ffmpeg -i BigBuckBunny.mp4 -c:v libx264 -preset fast -crf 23 -c:a aac -b:a 128k -flush_packets 1 output_video.mp4 +``` + +### Observe Disk Usage + +While the transcoding is running, you can use the `pidstat` command to see the disk statistics of that specific process. + +```bash +pidstat -d -p $(pgrep ffmpeg) 1 +``` + +Since this example video (151 MB) fits within memory, you observe no `kB_rd/s` for the storage device after the initial read. However, because you are flushing to storage, you observe periodic writes of approximately 275 `kB_wr/s`. + +```output +Linux 6.8.0-1024-aws (ip-10-248-213-118) 04/15/25 _aarch64_ (2 CPU) + +10:01:24 UID PID kB_rd/s kB_wr/s kB_ccwr/s iodelay Command +10:01:25 1000 24250 0.00 276.00 0.00 0 ffmpeg +10:01:26 1000 24250 0.00 256.00 0.00 0 ffmpeg +10:01:27 1000 24250 0.00 216.00 0.00 0 ffmpeg +10:01:28 1000 24250 0.00 184.00 0.00 0 ffmpeg +10:01:29 1000 24250 0.00 424.00 0.00 0 ffmpeg +10:01:30 1000 24250 0.00 312.00 0.00 0 ffmpeg +10:01:31 1000 24250 0.00 372.00 0.00 0 ffmpeg +10:01:32 1000 24250 0.00 344.00 0.00 0 ffmpeg +``` + +{{% notice Note%}} +In this simple example, since you are interacting with a file on the mounted filesystem, you are also observing the behavior of the filesystem. +{{% /notice %}} + +There may be other processes or background services that are writing to this disk. You can use the `iotop` command for inspection. As shown in the output below, the `ffmpeg` process has the highest disk utilization. + +```bash +sudo iotop +``` + +```output +Total DISK READ: 0.00 B/s | Total DISK WRITE: 332.11 K/s +Current DISK READ: 0.00 B/s | Current DISK WRITE: 0.00 B/s + TID PRIO USER DISK READ DISK WRITE> COMMAND + 24891 be/4 ubuntu 0.00 B/s 332.11 K/s ffmpeg -i BigBuckBunny.mp4 -c:v ~ts 1 output_video.mp4 [mux0:mp4] + 1 be/4 root 0.00 B/s 0.00 B/s systemd --system --deserialize=74 + 2 be/4 root 0.00 B/s 0.00 B/s [kthreadd] +``` + +Using the input/output statistics command (`iostat`), you can observe the system-wide metrics from the `nvme0n1` drive. Please note that you are using a snapshot of this workload; more accurate characteristics can be obtained by measuring the distribution of a workload. + +```bash +watch -n 0.1 iostat -z nvme0n1 +``` +You see output similar to that below. + +```output +Device tps kB_read/s kB_wrtn/s kB_dscd/s kB_read kB_wrtn kB_dscd +nvme0n1 3.81 31.63 217.08 0.00 831846 5709210 0 +``` + +To observe more detailed metrics, you can run `iostat` with the `-x` option. + +```bash +iostat -xz nvme0n1 +``` + +The output is similar to: + +```output +Device r/s rkB/s rrqm/s %rrqm r_await rareq-sz w/s wkB/s wrqm/s %wrqm w_await wareq-sz d/s dkB/s drqm/s %drqm d_await dareq-sz f/s f_await aqu-sz %util +nvme0n1 0.66 29.64 0.24 26.27 0.73 44.80 2.92 203.88 3.17 52.01 2.16 69.70 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01 0.15 +``` + +### Basic Characteristics of the Example Workload + +This is a simple transcoding workload with flushed writes, where most data is processed and stored in memory. Disk I/O is minimal, with an IOPS of just 3.81, low throughput (248.71 kB/s), and an average IO depth of 0.01 — all summarized in very low disk utilization. The 52% write merge rate and low latencies further suggest sequential, infrequent disk access, reinforcing that the workload is primarily memory-bound. + +| Metric | Calculation Explanation | Value | +|--------------------|-------------------------------------------------------------------------------------------------------------|---------------| +| IOPS | Taken directly from the `tps` (transfers per second) field | 3.81 | +| Throughput (Read) | From monitoring tool output | 31.63 kB/s | +| Throughput (Write) | From monitoring tool output | 217.08 kB/s | +| Throughput (Total) | Sum of read and write throughput | 248.71 kB/s | +| Avg I/O Size | Total throughput divided by IOPS: 248.71 / 3.81 | ≈ 65.3 KB | +| Read Ratio | Read throughput ÷ total throughput: 31.63 / 248.71 | ~13% | +| Write Ratio | Write throughput ÷ total throughput: 217.08 / 248.71 | ~87% | +| IO Depth | Taken directly from `aqu-sz` (average number of in-flight I/Os) | 0.01 | +| Access Pattern | 52% of writes were merged (`wrqm/s` = 3.17, `w/s` = 2.92), indicating mostly sequential disk access with low wait times and frequent cache hits | Sequential (52.01% merged) | + +{{% notice Note %}} +If you have access to the workload's source code, you can more easily observe the expected access patterns. +{{% /notice %}} diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/diskio.jpeg b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/diskio.jpeg new file mode 100644 index 0000000000..cd21b64654 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/diskio.jpeg differ diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/introduction.md b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/introduction.md new file mode 100644 index 0000000000..fac623a0d1 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/introduction.md @@ -0,0 +1,47 @@ +--- +title: Fundamentals of storage systems +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Introduction + +Ideally, your system's storage activity should be zero—meaning all application data and instructions are available in memory or cache, with no reads or writes to hard disk drives (HDDs) or solid-state drives (SSDs) required. However, due to physical capacity limits, data volatility, and the need to store large amounts of data, most applications frequently access storage media. + +## High-Level Flow of Data + +The diagram below provides a high-level overview of how data is written to or read from a storage device. It illustrates a multi-disk I/O architecture, where each disk (Disk 1 to Disk N) has its own I/O queue and optional disk cache, communicating with a central CPU via a disk controller. Memory, not explicitly shown, sits between the CPU and storage, offering fast but volatile access. File systems, also not depicted, operate at the OS/kernel level to handle file access metadata and provide a user-friendly interface through files and directories. + +![disk i/o](./diskio.jpeg) + +## Key Terms + +#### Sectors and Blocks + +Sectors are the basic physical units on a storage device. Traditional hard drives typically use a sector size of 512 bytes, while many modern disks use 4096 bytes (4K sectors) for improved error correction and efficiency. + +Blocks are logical groupings of one or more sectors used by filesystems for data organization. A common filesystem block size is 4096 bytes, meaning each block might consist of eight 512-byte sectors, or map directly to a 4096-byte physical sector if supported by the disk. + +#### Input/Output Operations per Second (IOPS) + +IOPS measures how many random read or write requests your storage system can handle per second. IOPS can vary by block size and storage medium (e.g., flash drives). Traditional HDDs often do not specify IOPS; for example, AWS does not show IOPS values for HDD volumes. + +![iops_hdd](./IOPS.png) + +#### Throughput and Bandwidth + +Throughput is the data transfer rate, usually measured in MB/s. Bandwidth specifies the maximum amount of data a connection can transfer. You can calculate storage throughput as IOPS × block size. + +#### Queue Depth + +Queue depth is the number of simultaneous I/O operations that can be pending on a device. Consumer SSDs typically have a queue depth of 32–64, while enterprise-class NVMe drives can support hundreds or thousands of concurrent requests per queue. Higher queue depth allows more parallelism and can improve I/O performance. + +#### I/O Engine + +The I/O engine is the software component in Linux that manages I/O requests between applications and the storage subsystem. For example, the Linux kernel’s block I/O scheduler queues and dispatches requests to device drivers, using multiple queues to optimize disk access. In benchmarking tools like fio, you can select I/O engines such as sync (synchronous I/O), `libaio` (Linux native asynchronous I/O), or `io_uring` (which uses newer Linux kernel features for asynchronous I/O). + +#### I/O Wait + +I/O wait is the time a CPU core spends waiting for I/O operations to complete. diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/using-fio.md b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/using-fio.md new file mode 100644 index 0000000000..6e7e0a40ed --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/using-fio.md @@ -0,0 +1,191 @@ +--- +title: Using fio +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Install fio + +You can use the same `t4g.medium` instance from the previous section with 2 different types of SSD-based block storage devices as per the console screenshot below. + +To add the required EBS volumes to your EC2 instance: + +1. In the AWS Console, navigate to EC2 > Volumes > Create Volume +2. Create a volume with the following settings: + - Volume Type: io2 (Provisioned IOPS SSD) + - Size: 8 GiB + - IOPS: 400 + - Availability Zone: Same as your EC2 instance +3. Create another volume with the following settings: + - Volume Type: gp2 (General Purpose SSD) + - Size: 8 GiB + - Availability Zone: Same as your EC2 instance +4. Once created, select each volume and choose Actions > Attach Volume +5. Select your t4g.medium instance from the dropdown and attach each volume + +Both block devices have the same, 8GiB capacity but the `io2` is geared towards throughput as opposed to the general purpose SSD `gp2`. + +![EBS](./EBS.png) + +In this section you will observe what the real-world performance for your workload is so that it can inform your selection. + +Flexible I/O (fio) is a command-line tool to generate a synthetic workload with specific I/O characteristics. This serves as a simpler alternative to full record and replay testing. Fio is available through most Linux distribution packages, please refer to the [documentation](https://github.com/axboe/fio) for the binary package availability. + +```bash +sudo apt update +sudo apt install fio -y +``` + +Confirm installation with the following commands. + +```bash +fio --version +``` + +The version is printed: + +```output +fio-3.37 +``` + +## Locate Device + +Fio allows you to microbenchmark either the block device or a mounted filesystem. Use the disk free, `df` command to confirm your EBS volumes are not mounted. Writing to drives that hold critical information may cause issues. Hence you are writing to blank, unmounted block storage device. + +Use the `lsblk` command to view the EBS volumes attached to the server (`nvme1n1` and `nvme2n1`). The immediate number appended to `nvme`, e.g., `nvme0`, shows it is a physically separate device. `nvme1n1` corresponds to the faster `io2` block device and `nvme2n1` corresponds to the slower `gp2` block device. + +```bash +lsblk -e 7 +``` + +```output +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS +nvme1n1 259:0 0 8G 0 disk +nvme0n1 259:1 0 8G 0 disk +├─nvme0n1p1 259:3 0 7G 0 part / +├─nvme0n1p15 259:4 0 99M 0 part /boot/efi +└─nvme0n1p16 259:5 0 923M 0 part /boot +nvme2n1 259:2 0 8G 0 disk +``` + +{{% notice Please Note%}} +If you have more than 1 block volumes attached to an instance, the `sudo nvme list` command from the `nvme-cli` package can be used to differentiate between volumes +{{% /notice %}} + +## Generating a Synthetic Workload + +Suppose you want to simulate a fictional logging application with the following characteristics observed using the tools from the previous section. + +{{% notice Workload%}} +The logging workload has light sequential read and write characteristics. The system write throughput per thread is 5 MB/s with 83% writes. There are infrequent bursts of reads for approximately 5 seconds, operating at up to 16MB/s per thread. The workload can scale the infrequent reads and writes to use up to 16 threads each. The block size for the writes and reads are 64KiB and 256KiB respectively (as opposed to the standard 4KiB Page size). + +Further, the application is latency sensitive and given it holds critical information, needs to write directly to non-volatile storage through direct IO. +{{% /notice %}} + +The fio tool uses simple configuration `jobfiles` to describe the characteristics of your synthetic workload. Parameters under the `[global]` option are shared among jobs. From the example below, you can create 2 jobs to represent the steady write and infrequent reads. Please refer to the official [documentation](https://fio.readthedocs.io/en/latest/fio_doc.html#job-file-format) for more details. + +Copy and paste the configuration file below into 2 files named `nvme.fio`. Replace the `` with the block devices you are comparing and adjust the `filename` parameter accordingly. + +```ini + ; -- start job file including.fio -- +[global] +ioengine=libaio +direct=1 ; write directly to the drive +time_based +runtime=30 +group_reporting=1 +log_avg_msec=1000 +rate=16m,5m ; limit to 16 MB/s and 5MB/s for read and write per job +numjobs=${NUM_JOBS} ; set at the command line +iodepth=${IO_DEPTH} ; set at the command line +filename=/dev/nvme1n1 ; or nvme2n1 + +[steady_write] +name=steady_write +rw=write ; sequential write +bs=64k ; Block size of 64KiB (default block size of 4 KiB) + +[burst_read] +name=burst_read +rw=read +bs=256k ; Block size of 256KiB for reads (default is 4KiB) +startdelay=10 ; simulate infrequent reads (5 seconds out 30) +runtime=5 +; -- end job file including.fio -- +``` + + +{{% notice Note %}} +Running fio directly on block devices requires root privileges (hence the use of `sudo`). Be careful: writing to the wrong device can result in data loss. Always ensure you are targeting a blank, unmounted device. +{{% /notice %}} + +Run the following commands to run each test back to back. + +```bash +sudo NUM_JOBS=16 IO_DEPTH=64 fio nvme1.fio +``` + +Then run again with the following command: + +```bash +sudo NUM_JOBS=16 IO_DEPTH=64 fio nvme2.fio +``` + +### Interpreting Results + +The final terminal output from both runs are shown below. + +```output +nvme1: + +Run status group 0 (all jobs): + READ: bw=118MiB/s (124MB/s), 118MiB/s-118MiB/s (124MB/s-124MB/s), io=629MiB (660MB), run=5324-5324msec + WRITE: bw=80.0MiB/s (83.9MB/s), 80.0MiB/s-80.0MiB/s (83.9MB/s-83.9MB/s), io=2400MiB (2517MB), run=30006-30006msec + +Disk stats (read/write): + nvme1n1: ios=2663/38225, sectors=1294480/4892800, merge=0/0, ticks=148524/454840, in_queue=603364, util=62.19% + +nvme2: + +Run status group 0 (all jobs): + READ: bw=85.6MiB/s (89.8MB/s), 85.6MiB/s-85.6MiB/s (89.8MB/s-89.8MB/s), io=456MiB (478MB), run=5322-5322msec + WRITE: bw=60.3MiB/s (63.2MB/s), 60.3MiB/s-60.3MiB/s (63.2MB/s-63.2MB/s), io=1816MiB (1904MB), run=30119-30119msec + +Disk stats (read/write): + nvme2n1: ios=1872/28855, sectors=935472/3693440, merge=0/0, ticks=159753/1025104, in_queue=1184857, util=89.83% +``` + +Here you can see that the faster `io2` block storage (`nvme1`) is able to meet the throughput requirement of 80MB/s for steady writes when all 16 write threads are running (5MB/s per thread). However, `gp2` saturates at 60.3 MiB/s with over 89.8% SSD utilization. + +Suppose your fictional logging application is sensitive to operation latency. The output below highlights that over ~35% of operations have a latency above 1s on nvme2 compared to ~7% on nvme1. High latency percentiles can significantly impact application responsiveness, especially for latency-sensitive workloads like logging. + +```output + + nvme2: + + lat (usec) : 10=0.01%, 500=1.53%, 750=5.13%, 1000=7.55% + lat (msec) : 2=29.49%, 4=0.89%, 10=0.09%, 20=0.02%, 50=0.21% + lat (msec) : 100=0.56%, 250=1.84%, 500=6.39%, 750=9.76%, 1000=10.17% + lat (msec) : 2000=19.59%, >=2000=6.77% + + nvme1: + + lat (usec) : 750=0.44%, 1000=0.41% + lat (msec) : 2=62.63%, 4=1.12%, 10=0.34%, 20=1.61%, 50=3.91% + lat (msec) : 100=2.34%, 250=5.91%, 500=8.46%, 750=4.33%, 1000=2.50% + lat (msec) : 2000=3.62%, >=2000=2.38% +``` + +These insights suggest the SSD designed for throughput, `io2`, is more suitable than the general purpose `gp2` storage to meet the requirements of your logging application. + +{{% notice Tip%}} +If the text output is hard to follow, you can use the `fio2gnuplot` package to plot the data graphically or use the visualizations available from your cloud service provider's dashboard. See image below for an example. + + ![plot](./visualisations.png) +{{% /notice %}} + +The insights gathered by microbenchmarking with fio above can lead to more informed decisions about which block storage to connect to your Arm-based instance. + + diff --git a/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/visualisations.png b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/visualisations.png new file mode 100644 index 0000000000..20f83e8d72 Binary files /dev/null and b/content/learning-paths/servers-and-cloud-computing/disk-io-benchmark/visualisations.png differ diff --git a/content/learning-paths/servers-and-cloud-computing/funASR/1_asr.md b/content/learning-paths/servers-and-cloud-computing/funasr/1_asr.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/funASR/1_asr.md rename to content/learning-paths/servers-and-cloud-computing/funasr/1_asr.md diff --git a/content/learning-paths/servers-and-cloud-computing/funASR/2_modelscope.md b/content/learning-paths/servers-and-cloud-computing/funasr/2_modelscope.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/funASR/2_modelscope.md rename to content/learning-paths/servers-and-cloud-computing/funasr/2_modelscope.md diff --git a/content/learning-paths/servers-and-cloud-computing/funASR/3_funasr.md b/content/learning-paths/servers-and-cloud-computing/funasr/3_funasr.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/funASR/3_funasr.md rename to content/learning-paths/servers-and-cloud-computing/funasr/3_funasr.md diff --git a/content/learning-paths/servers-and-cloud-computing/funASR/_index.md b/content/learning-paths/servers-and-cloud-computing/funasr/_index.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/funASR/_index.md rename to content/learning-paths/servers-and-cloud-computing/funasr/_index.md diff --git a/content/learning-paths/servers-and-cloud-computing/funasr/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/funasr/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/funasr/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/geekbench/_index.md b/content/learning-paths/servers-and-cloud-computing/geekbench/_index.md index bfb1d90534..4a2efae52b 100644 --- a/content/learning-paths/servers-and-cloud-computing/geekbench/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/geekbench/_index.md @@ -4,9 +4,9 @@ title: Get started with Geekbench on Arm minutes_to_complete: 15 who_is_this_for: This is an introductory topic for software developers interested in comparing the performance of Arm Linux computers using Geekbench. - + learning_objectives: -- Learn how to install and run Geekbench +- Learn how to install and run Geekbench - Use Geekbench to help determine the appropriate hardware configuration for your workload prerequisites: @@ -25,8 +25,6 @@ operatingsystems: test_maintenance: true test_images: - ubuntu:latest -test_status: - - passed tools_software_languages: - Geekbench - Runbook @@ -46,7 +44,8 @@ further_reading: link: https://www.xda-developers.com/geekbench/ type: website - +### FIXED, DO NOT MODIFY +# ================================================================================ weight: 1 layout: learningpathall learning_path_main_page: 'yes' diff --git a/content/learning-paths/servers-and-cloud-computing/geekbench/geekbench.md b/content/learning-paths/servers-and-cloud-computing/geekbench/geekbench.md index d8d9a39759..2470664c94 100644 --- a/content/learning-paths/servers-and-cloud-computing/geekbench/geekbench.md +++ b/content/learning-paths/servers-and-cloud-computing/geekbench/geekbench.md @@ -4,11 +4,11 @@ title: Download and run Geekbench weight: 2 --- -When selecting Arm-based hardware, you may need a way to compare different systems and select a hardware configuration for your workload. Running benchmarks on different systems with different configurations is a good way to get more information about system performance. +When selecting Arm-based hardware, you may need a way to compare different systems and select a hardware configuration for your workload. Running benchmarks on different systems with different configurations is a good way to get more information about system performance. [Geekbench](https://www.geekbench.com/index.html) is a cross-platform benchmark that makes it easy to measure system performance. [Preview Versions](https://www.geekbench.com/preview/) are available for Linux on Arm. You can also check the Geekbench [downloads](https://www.geekbench.com/download/) area for additional operating system options. -Geekbench provides a single-core score and a multi-core score, as well as individual performance scores for specific tests. You can use the scores to compare different systems and different configurations. A higher score is better. +Geekbench provides a single-core score and a multi-core score, as well as individual performance scores for specific tests. You can use the scores to compare different systems and different configurations. A higher score is better. Additional features are available with a purchased [license](https://www.primatelabs.com/store/). @@ -16,7 +16,7 @@ Additional features are available with a purchased [license](https://www.primate You will need a local Arm platform or an [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud service provider. Geekbench 5 requires `Ubuntu 16.04 LTS` or later and Geekbench 6 requires `Ubuntu 18.04 LTS` or later. -## Download +## Download Both Geekbench 5 and Geekbench 6 binaries are available to download. The instructions below are for the Geekbench 6 preview version. @@ -26,21 +26,21 @@ Both Geekbench 5 and Geekbench 6 binaries are available to download. The instruc sudo apt install -y wget ``` -2. Download, extract the archive, and navigate to the directory: +2. Download, extract the archive: ```bash wget https://cdn.geekbench.com/Geekbench-6.2.2-LinuxARMPreview.tar.gz tar -xf Geekbench-6.2.2-LinuxARMPreview.tar.gz -cd Geekbench-6.2.2-LinuxARMPreview ``` -## Run +## Run -There is no need for a browser or Linux desktop to run the benchmark. The Linux command line is all you need. +There is no need for a browser or Linux desktop to run the benchmark. The Linux command line is all you need. -Run the `geekbench6` benchmark. +Navigate to the extracted directory and run the `geekbench6` benchmark. ```bash +cd Geekbench-6.2.2-LinuxARMPreview ./geekbench6 ``` @@ -59,7 +59,7 @@ Upload succeeded. Visit the following link and view your results online: You can create an account on [Geekbench browser](https://browser.geekbench.com) and save the results from your runs. This makes it easy to run Geekbench on a variety of systems and see your results together and compare them. You can also add notes to the results to help remember information about each run. -The `` and `` will be unique numbers for your run. +The `` and `` will be unique numbers for your run. ```output Visit the following link and add this result to your profile: diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Example_application.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/example_application.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Example_application.md rename to content/learning-paths/servers-and-cloud-computing/java-gc-tuning/example_application.md diff --git a/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Tuning Parameters.md b/content/learning-paths/servers-and-cloud-computing/java-gc-tuning/tuning-parameters.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/java-gc-tuning/Tuning Parameters.md rename to content/learning-paths/servers-and-cloud-computing/java-gc-tuning/tuning-parameters.md diff --git a/content/learning-paths/servers-and-cloud-computing/memcached/_index.md b/content/learning-paths/servers-and-cloud-computing/memcached/_index.md index 019ff66b14..21e29d41fd 100644 --- a/content/learning-paths/servers-and-cloud-computing/memcached/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/memcached/_index.md @@ -1,29 +1,35 @@ --- -armips: -- Neoverse -author: Pareena Verma -layout: learningpathall +title: Run memcached on Arm servers and measure its performance + +minutes_to_complete: 10 + +who_is_this_for: This is an introductory topic for developers who want to use memcached as their in-memory key-value store. + + learning_objectives: - Install and run memcached on your Arm-based cloud server - Use an open-source benchmark to test memcached performance -learning_path_main_page: 'yes' -minutes_to_complete: 10 -operatingsystems: -- Linux + prerequisites: - An Arm based instance from an appropriate cloud service provider. -skilllevels: Introductory -subjects: Web + +author: Pareena Verma + test_images: - ubuntu:latest test_link: https://github.com/armflorentlebeau/arm-learning-paths/actions/runs/4312122327 test_maintenance: true -test_status: -- passed -title: Run memcached on Arm servers and measure its performance + +### Tags +skilllevels: Introductory +subjects: Web +armips: +- Neoverse +operatingsystems: +- Linux tools_software_languages: -- Memcached - Runbook +- Memcached further_reading: - resource: @@ -36,8 +42,10 @@ further_reading: type: blog - -weight: 1 -who_is_this_for: This is an introductory topic for developers who want to use memcached as their in-memory key-value store. - +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md b/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md index 3d7e04e820..8911ab48ed 100644 --- a/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/memcached_cache/_index.md @@ -1,45 +1,55 @@ --- -armips: -- Neoverse -author: Pareena Verma +title: Deploy Memcached as a cache for MySQL and PostgreSQL on Arm based servers + + description: Deploy Memcached as a cache for MySQL and PostgreSQL on Arm servers -layout: learningpathall + +minutes_to_complete: 60 + +who_is_this_for: This is an advanced topic for developers who want to use memcached as their in-memory key-value store. + learning_objectives: - Deploy memcached as a cache for MySQL on AWS, Azure and GCP Arm based Instance - Deploy memcached as a cache for PostgreSQL on AWS, Azure and GCP Arm based Instance -learning_path_main_page: 'yes' -minutes_to_complete: 60 -operatingsystems: -- Linux + prerequisites: - An Amazon Web Services (AWS) [account](https://aws.amazon.com/) - An Azure portal [account](https://azure.microsoft.com/en-in/get-started/azure-portal) - A Google Cloud [account](https://console.cloud.google.com/) - A machine with [Terraform](/install-guides/terraform/), [AWS CLI](/install-guides/aws-cli), [Google Cloud CLI](/install-guides/gcloud), [Azure CLI](/install-guides/azure-cli), [AWS IAM authenticator](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html), and [Ansible](/install-guides/ansible/) installed -skilllevels: Advanced -subjects: Web + +author: Pareena Verma + + test_images: - ubuntu:latest test_link: https://github.com/armflorentlebeau/arm-learning-paths/actions/runs/4312122327 test_maintenance: true -test_status: -- passed -title: Deploy Memcached as a cache for MySQL and PostgreSQL on Arm based servers + +### Tags +skilllevels: Advanced +subjects: Web +armips: +- Neoverse tools_software_languages: - Memcached - SQL - MySQL - PostgreSQL +operatingsystems: +- Linux further_reading: - resource: title: Memcached Wiki link: https://github.com/memcached/memcached/wiki - type: documentation + type: documentation - -weight: 1 -who_is_this_for: This is an advanced topic for developers who want to use memcached as their in-memory key-value store. - +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md b/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md index fef4ecf7e0..d10a5d34f5 100644 --- a/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/ml-perf/_index.md @@ -1,31 +1,39 @@ --- -armips: -- Neoverse -author: Pareena Verma -layout: learningpathall +title: Measure Machine Learning Inference Performance on Arm servers + +minutes_to_complete: 20 + +who_is_this_for: This is an introductory topic for software developers interested + in benchmarking machine learning workloads on Arm servers. + + learning_objectives: - Install and run TensorFlow on your Arm-based cloud server. - Use MLPerf Inference benchmark suite, an open-sourced benchmark from MLCommons to test ML performance on your Arm server. -learning_path_main_page: 'yes' -minutes_to_complete: 20 -operatingsystems: -- Linux + prerequisites: - An [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from an appropriate cloud service provider or an on-premise Arm server. -skilllevels: Introductory -subjects: ML + +author: Pareena Verma + test_images: - ubuntu:latest test_link: https://github.com/armflorentlebeau/arm-learning-paths/actions/runs/4312122327 test_maintenance: true -test_status: -- passed -title: Measure Machine Learning Inference Performance on Arm servers + +### Tags +skilllevels: Introductory +subjects: ML +armips: +- Neoverse +operatingsystems: +- Linux tools_software_languages: - TensorFlow - Runbook + further_reading: - resource: title: MLPerf Inference Suite Source repo @@ -41,8 +49,10 @@ further_reading: type: blog - -weight: 1 -who_is_this_for: This is an introductory topic for software developers interested - in benchmarking machine learning workloads on Arm servers. +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +layout: learningpathall --- diff --git a/content/learning-paths/servers-and-cloud-computing/ml-perf/ml-perf.md b/content/learning-paths/servers-and-cloud-computing/ml-perf/ml-perf.md index 2b9b7064c7..fe843137c7 100644 --- a/content/learning-paths/servers-and-cloud-computing/ml-perf/ml-perf.md +++ b/content/learning-paths/servers-and-cloud-computing/ml-perf/ml-perf.md @@ -5,7 +5,7 @@ title: "Measure ML Inference Performance on Arm servers" weight: 2 layout: "learningpathall" - + --- @@ -34,11 +34,12 @@ sudo pip install pybind11 ``` ## Clone the MLPerf Inference Benchmarks Repo for Image Classification and Object Detection -You will use the MLPerf Inference benchmark suite from MLCommons to benchmark models for a widely-used ML use-case such as Image Classification and Object Detection. +You will use the MLPerf Inference benchmark suite from MLCommons to benchmark models for a widely-used ML use-case such as Image Classification and Object Detection. Start by cloning the repository below: ```bash +cd $HOME git clone --recurse-submodules https://github.com/mlcommons/inference.git mlperf_inference ``` @@ -47,7 +48,7 @@ git clone --recurse-submodules https://github.com/mlcommons/inference.git mlperf Next, build and install the MLPerf Inference Benchmark for the Image Classification and Object Detection use case using the steps below: ```bash -cd mlperf_inference/loadgen/ +cd $HOME/mlperf_inference/loadgen/ CFLAGS="-std=c++14" sudo python3 setup.py develop --user cd ../vision/classification_and_detection/ sudo python3 setup.py develop @@ -72,21 +73,21 @@ export ONEDNN_DEFAULT_FPMATH_MODE=BF16 ``` AWS Graviton3 instances are the first instances with BF16 support. -## Download the ML Model +## Download the ML Model Next, download the ML model you want to run the benchmark with. In this example, download the `resnet50-v1.5` model. -```bash { cwd="~/mlperf_inference/vision/classification_and_detection/" } +```bash { cwd="$HOME/mlperf_inference/vision/classification_and_detection/" } wget -q https://zenodo.org/record/2535873/files/resnet50_v1.pb ``` -## Download the dataset +## Download the dataset You need to download a dataset for the ML model you want to benchmark. The imagenet2012 validation dataset is best used with this ML model. You can [download the dataset](http://image-net.org/challenges/LSVRC/2012/) after you register. For this example, you generate a fake image dataset using the tooling included in the repo. Use the command below: -```bash { cwd="~/mlperf_inference/vision/classification_and_detection/" } +```bash { cwd="$HOME/mlperf_inference/vision/classification_and_detection/" } tools/make_fake_imagenet.sh ``` @@ -101,9 +102,9 @@ export DATA_DIR=`pwd`/fake_imagenet ## Now run the benchmark on your Arm machine -You can now launch the benchmark on your Arm machine, using the command below: +You can now launch the benchmark on your Arm machine, using the command below: -```bash { env="TF_ENABLE_ONEDNN_OPTS=1;ONEDNN_DEFAULT_FPMATH_MODE=BF16;MODEL_DIR=~/mlperf_inference/vision/classification_and_detection/;DATA_DIR=~/mlperf_inference/vision/classification_and_detection/fake_imagenet", cwd="~/mlperf_inference/vision/classification_and_detection/" } +```bash { env="TF_ENABLE_ONEDNN_OPTS=1 ONEDNN_DEFAULT_FPMATH_MODE=BF16 MODEL_DIR=$HOME/mlperf_inference/vision/classification_and_detection/ DATA_DIR=$HOME/mlperf_inference/vision/classification_and_detection/fake_imagenet", cwd="$HOME/mlperf_inference/vision/classification_and_detection/" } ./run_local.sh tf resnet50 cpu ``` @@ -121,7 +122,7 @@ device is one of [cpu|gpu] For all other options, run help as shown below: -```bash { env="TF_ENABLE_ONEDNN_OPTS=1;ONEDNN_DEFAULT_FPMATH_MODE=BF16;MODEL_DIR=~/mlperf_inference/vision/classification_and_detection/;DATA_DIR=~/mlperf_inference/vision/classification_and_detection/fake_imagenet", cwd="~/mlperf_inference/vision/classification_and_detection/" } +```bash { env="TF_ENABLE_ONEDNN_OPTS=1 ONEDNN_DEFAULT_FPMATH_MODE=BF16 MODEL_DIR=$HOME/mlperf_inference/vision/classification_and_detection DATA_DIR=$HOME/mlperf_inference/vision/classification_and_detection/fake_imagenet", cwd="$HOME/mlperf_inference/vision/classification_and_detection" } ./run_local.sh --help ``` diff --git a/content/learning-paths/servers-and-cloud-computing/mongodb/_index.md b/content/learning-paths/servers-and-cloud-computing/mongodb/_index.md index 02b4f03acd..a529ed6c56 100644 --- a/content/learning-paths/servers-and-cloud-computing/mongodb/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/mongodb/_index.md @@ -11,7 +11,7 @@ learning_objectives: - Measure and compare the performance of MongoDB on Arm versus other architectures with Yahoo Cloud Serving Benchmark (YCSB). prerequisites: -- An Arm based instance from a cloud service provider. +- An [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud service provider. armips: - Neoverse diff --git a/content/learning-paths/servers-and-cloud-computing/mongodb/automate_setup_pulumi.md b/content/learning-paths/servers-and-cloud-computing/mongodb/automate_setup_pulumi.md index e522ceae68..95e3778229 100644 --- a/content/learning-paths/servers-and-cloud-computing/mongodb/automate_setup_pulumi.md +++ b/content/learning-paths/servers-and-cloud-computing/mongodb/automate_setup_pulumi.md @@ -20,7 +20,7 @@ Install the python dependencies on your Ubuntu 22.04 machine: sudo apt update sudo apt install python-is-python3 -y sudo apt install python3-pip -y -sudo apt install python3.10-venv +sudo apt install python3.10-venv -y ``` ## Install Pulumi @@ -41,7 +41,7 @@ git clone https://github.com/pbk8s/pulumi-ec2.git ``` ## Build gatord -You would also need the gatord binary for performance analysis. [gator](https://github.com/ARM-software/gator) is a target agent (daemon), part of Arm Streamline, a set of performance analysis tools. Use the following commands to build it from source. +You will also need the gatord binary for performance analysis. [gator](https://github.com/ARM-software/gator) is a target agent (daemon), part of Arm Streamline, a set of performance analysis tools. Use the following commands to build it from source. ```bash git clone https://github.com/ARM-software/gator.git @@ -65,14 +65,14 @@ cp build-native-gcc-rel/gatord ~/pulumi-ec2/ ## Install awscli and set environment variables Use the [awscli](https://learn.arm.com/install-guides/aws-cli/) learning path to install the awscli. -Set the following environment variables on your local computer to connect to your AWS account -```console +Set the following environment variables on your local computer to connect to your AWS account: +```bash export AWS_ACCESS_KEY_ID= export AWS_SECRET_ACCESS_KEY= export AWS_SESSION_TOKEN= ``` -Execute the following command to validate the credentials -```console +Execute the following command to validate the credentials: +```bash aws sts get-caller-identity ``` @@ -134,7 +134,7 @@ subnet = aws.ec2.Subnet("p1-subnet", }) ``` -Note: The security groups created by this script are lot less restrictive, to simplify the deployment process and to remove additional complexities. Please modify the ingress/egress rules as per your organizations' policy. +Note: The security groups created by this script are a lot less restrictive, to simplify the deployment process and to remove additional complexities. Please modify the ingress/egress rules as per your organization's policy. ```python group = aws.ec2.SecurityGroup('p1-security-grouup', diff --git a/content/learning-paths/servers-and-cloud-computing/mongodb/benchmark_mongodb-8.0.md b/content/learning-paths/servers-and-cloud-computing/mongodb/benchmark_mongodb-8.0.md index fd13c2030d..61508cbbb3 100644 --- a/content/learning-paths/servers-and-cloud-computing/mongodb/benchmark_mongodb-8.0.md +++ b/content/learning-paths/servers-and-cloud-computing/mongodb/benchmark_mongodb-8.0.md @@ -8,7 +8,7 @@ weight: 4 # (intro is 1), 2 is first, 3 is second, etc. layout: "learningpathall" --- -To further measure the performance of MongoDB, you can run the [Yahoo Cloud Serving Benchmark](http://github.com/brianfrankcooper/YCSB). +To further measure the performance of MongoDB, you can run the [Yahoo Cloud Serving Benchmark](https://github.com/brianfrankcooper/YCSB). YCSB is an open source project which provides the framework and common set of workloads to evaluate the performance of different "key-value" and "cloud" serving stores. Use the steps below to run YCSB to evaluate the performance of MongoDB running on 64-bit Arm machine. @@ -22,13 +22,14 @@ Install the additional software: {{< tab header="Ubuntu" >}} sudo apt install -y maven make gcc {{< /tab >}} - {{< tab header="RHE/Amazon" >}} + {{< tab header="RHEL / Amazon Linux" >}} sudo yum check-update -sudo yum install python2 +# Python 2 may not be available via yum on recent RHEL/Amazon Linux versions. +# If needed, follow the manual installation steps below. {{< /tab >}} {{< /tabpane >}} -For Ubuntu 22.04 and 24.04, Python 2 is not available using the package manager. +For Ubuntu 22.04 and 24.04, Python 2 is not available using the package manager. You can install Python 2.7 using: @@ -38,7 +39,7 @@ wget https://www.python.org/ftp/python/2.7.18/Python-2.7.18.tgz tar xvf Python-2.7.18.tgz cd Python-2.7.18 ./configure --enable-optimizations -make -j $nproc +make -j $(nproc) sudo make altinstall sudo ln -s /usr/local/bin/python2.7 /usr/local/bin/python ``` @@ -61,21 +62,21 @@ cd ycsb-0.17.0 ./bin/ycsb ``` -## Load/Insert Test on MongoDB +## A simple Load/Insert Test on MongoDB -To load and test the performance of loading data(INSERT) into default database `ycsb` at `localhost:27017` where MongoDB is running using the synchronous driver run the following command: +To load and test the performance of loading data(INSERT) into default database `ycsb` at `(localhost/Primary Node):27017` where MongoDB is running using the synchronous driver run the following command: ```console ./bin/ycsb load mongodb -s -P workloads/workloada -p mongodb.url=mongodb://localhost:27017/ycsb?w=0 -threads 10 ``` -The "-P" parameter is used to load property files. In this example, you used it load the workloada parameter file which sets the recordcount to 1000 in addition to other parameters. The "-threads" parameter indicates the number of threads and is set to 1 by default. +The "-P" parameter is used to load property files. In this example, you used it load the workloada parameter file which sets the recordcount to 1000 in addition to other parameters. The "-threads" parameter indicates the number of client threads (default is 1); this example uses 10 threads. -## Update/Read/Read Modify Write Test on MongoDB +## A simple Update/Read/Read Modify Write Test on MongoDB To test the performance of executing a workload which includes running UPDATE, Read Modify Write(RMW) and/or READ operations on the data using 10 threads for example, use the following command: ```console -./bin/ycsb load mongodb -s -P workloads/workloada -p mongodb.url=mongodb://localhost:27017/ycsb?w=0 +./bin/ycsb run mongodb -s -P workloads/workloada -p mongodb.url=mongodb://localhost:27017/ycsb?w=0 -threads 10 ``` The workloads/workloada file in this example sets the following values `readproportion=0.5` and `updateproportion=0.5` which means there is an even split between the number of READ and UPDATE operations performed. You can change the type of operations and the splits by providing your own workload parameter file. @@ -115,5 +116,5 @@ At the end of each test, statistics are printed to the console. Shown below is t ... ``` -Continue to the next section to run YCSB on a 3 node cluster. +[Continue to the next section to run YCSB on a 3 node cluster.](/learning-paths/servers-and-cloud-computing/mongodb/replica_set_testing) diff --git a/content/learning-paths/servers-and-cloud-computing/mongodb/create_replica_set.md b/content/learning-paths/servers-and-cloud-computing/mongodb/create_replica_set.md new file mode 100644 index 0000000000..dff28104d3 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/mongodb/create_replica_set.md @@ -0,0 +1,182 @@ +--- +# User change +title: "Creating MongoDB test scenarios" + +weight: 3 # (intro is 1), 2 is first, 3 is second, etc. + +# Do not modify these elements +layout: "learningpathall" +--- +## MongoDB test scenarios +To test MongoDB you need two parts. An instance running the testing software ([YCSB](/learning-paths/servers-and-cloud-computing/mongodb/benchmark_mongodb-8.0)). One or more instances running MongoDB in some configuration. The recommended MongoDB test setup is a three node replica set. These three nodes are of equal size with one instance being designated as the primary node (the target for test traffic) and the others as secondary nodes. + +## What is a replica set? + +A replica set is a group of instances that maintain the same dataset. A replica set contains many nodes, but three nodes are the most common for testing. + +## What node size should I use? + +The most common size for testing MongoDB is an 8 vCPU instance. You can test with any sized instance, but if you are looking for ideal testing conditions, 8 vCPUs is enough. Each node should have at least 32GB of RAM. + +To achieve the best results, it's recommended to keep the complete data set in memory. If you see disk access when running tests, increase the RAM size of your instances. Additional details about the recommended configuration are provided below. + +## Creating replica sets + +You can create replica sets of any size (two is the minimum). Three is recommended but you can add as many as you like. + +## Three node replica sets + +To create a three node replica set, start by launching three Arm-based instances of equal size. + +[Install](/learning-paths/servers-and-cloud-computing/mongodb/run_mongodb) MongoDB on all three instances. + +Once all three instances are up and running, modify the service and configuration file for all instances. + +## Modify the MongoDB configuration + +Use a text editor to edit the file `/etc/mongod.conf` and replace the contents of the file with the text below. + +```console +# Configuration Options: https://docs.mongodb.org/manual/reference/configuration-options/ +# Log Messages/Components: https://docs.mongodb.com/manual/reference/log-messages/index.html + +systemLog: + destination: file + logAppend: true + path: /var/log/mongodb/mongodb.log + +storage: + dbPath: /mnt/mongodb # Mounting point selected + engine: wiredTiger + wiredTiger: + engineConfig: + configString: "cache_size=16484MB" # 50% of your ram is recommended. Adding more helps depending on dataset. + +replication: + replSetName: "rs0" # Name of your replica set + oplogSizeMB: 5000 + +# network interfaces +net: + port: 27017 + bindIp: 0.0.0.0 + maxIncomingConnections: 16000 +setParameter: + diagnosticDataCollectionDirectorySizeMB: 400 + honorSystemUmask: false + lockCodeSegmentsInMemory: true + reportOpWriteConcernCountersInServerStatus: true + suppressNoTLSPeerCertificateWarning: true + tlsWithholdClientCertificate: true +``` + +**Details of what these mean are below:** + +**systemLog:** Contains locations and details of where logging should be contained. +- **path:** Location for logging + +**storage:** It's recommended to run test within memory to achieve the best performance. This contains details on the engine used and location of storage. +- **engine:** Wiredtiger is used in this case. Using a disk will add latency. +- **cache_size:** The minimum if using the recommended instance size is 50% of 32(16gb). However, testing showed that using 18GB produced better results. + +**replication:** This is used for replica set setup. +- **replSetName:** This is the name of the replica set. +- **oplogSizeMB:** 5% of the disk size is recommended. + +**net:** Contains details of networking on the node. +- **port:** 27017 is the port used for replica sets +- **maxIncomingConnections:** The maximum number of incoming connections supported by MongoDB + +**setParameter:** Additional options +- **diagnosticDataCollectionDirectorySizeMB:** 400 is based on the docs. +- **honorSystemUmask:** Sets read and write permissions only to the owner of new files +- **lockCodeSegmentsInMemory:** Locks code into memory and prevents it from being swapped. +- **suppressNoTLSPeerCertificateWarning:** Allows clients to connect without a certificate. (Only for testing purposes) +- **tlsWithholdClientCertificate:** Will not send the certificate during communication. (Only for testing purposes) + +If you want to use encryption you will need to add the security and keyFile to your configuration. As well as change some of the parameters in the `mongod.conf` file. + +Run this command to reload the new configuration. + +```bash +sudo systemctl restart mongod +``` + +## Modify the MongoDB service + +Use a text editor to edit the file `/etc/systemd/system/mongod.service` and replace the contents of the file with the text below. + +``` +[Unit] +Description=High-performance, schema-free document-oriented database +After=network.target +Documentation=https://docs.mongodb.org/manual + +[Service] +User=mongodb +Group=mongodb +ExecStart=/usr/bin/mongod --quiet --config /etc/mongod.conf + +# Recommended limits for for mongod as specified in +# https://docs.mongodb.com/manual/reference/ulimit/#recommended-ulimit-settings +# (file size) +LimitFSIZE=infinity +# (cpu time) +LimitCPU=infinity +# (virtual memory size) +LimitAS=infinity +# (locked-in-memory size) +LimitMEMLOCK=infinity +# (open files) +LimitNOFILE=64000 +# (processes/threads) +LimitNPROC=64000 + +[Install] +WantedBy=multi-user.target +``` + +Details on these can be found in the [documentation](https://docs.mongodb.com/manual/reference/ulimit/#recommended-ulimit-settings). + +Run this command to reload the service. + +```bash +sudo systemctl daemon-reload +``` + +**Once all three instances are created and have MongoDB installed, select one to be your primary node. The remaining instances will be secondary nodes.** + +## Initialize the replica set + +Connect to the primary node and run the following commands below. + +1. Set variables with the IP addresses of each node: + + ```bash + PRIMARY_NODE_IP="" + SECONDARY_NODE1_IP="" + SECONDARY_NODE2_IP="" + ``` + +2. Initialize the replica set with the following command: + + ```bash + mongosh --host $PRIMARY_NODE_IP:27017 <}} sudo apt install default-jre default-jdk -y {{< /tab >}} -{{< tab header="RHE/Amazon" >}} +{{< tab header="RHEL/Amazon Linux" >}} sudo yum install java-17-openjdk {{< /tab >}} {{< /tabpane >}} @@ -33,37 +33,41 @@ On your instance running MongoDB (you may need to start a new terminal), clone t git clone https://github.com/idealo/mongodb-performance-test.git ``` -Now `cd` into the project folder and execute the `jar` file: +Now change into the project folder and execute the JAR file to see its usage instructions: ```bash { ret_code="1" } cd mongodb-performance-test java -jar ./latest-version/mongodb-performance-test.jar ``` -This will print a description of how to use the java application +This will print a description of how to use the Java application. ## Run Insert test Run a test that inserts documents on `localhost:27017` (default). -Use the following options: - * `-m` defines the test - * `-o` defines the number of iterations - * Alternatively, use `-d` to specify a time limit (in seconds) - * `-t` defines the number of threads - * `-db` defines the database to use - * `-c` defines how the data is collected. - - For example: +First, set an environment variable for the JAR file path for convenience: ```bash { cwd="./mongodb-performance-test" } export jarfile=./latest-version/mongodb-performance-test.jar +``` + +Use the following options: + * `-m` defines the test mode (e.g., `insert`, `update_one`). + * `-o` defines the number of operations (iterations). + * Alternatively, use `-d` to specify a duration limit (in seconds). + * `-t` defines the number of threads. + * `-db` defines the database to use. + * `-c` defines the collection to use. + + For example, run an insert test for 1 million operations using 10 threads: +```bash { cwd="./mongodb-performance-test" } java -jar $jarfile -m insert -o 1000000 -t 10 -db test -c perf ``` -As the test runs, the count will be printed periodically. It will increase until it reaches 1 million and then the test will end. +As the test runs, the progress count will be printed periodically. It will increase until it reaches 1 million, and then the test will end. ## Run Update-one test -Similarly, to run this test, updating one document per query using 10, 20 and finally 30 threads for 1 hour each run (3 hours in total) run the following command: +Similarly, to run an update test (updating one document per query) using 10, 20, and finally 30 threads for 1 hour each (3 hours total), run the following command: ```console java -jar $jarfile -m update_one -d 3600 -t 10 20 30 -db test -c perf @@ -73,7 +77,7 @@ For instructions on running any other tests or more details on the metrics repor ## View the results -During each test, statistics over the last second are printed every second in the console. The following is the output from the end of running Insert test: +During each test, statistics over the last second are printed to the console every second. After the test completes, final summary statistics are displayed. The following is example output from the end of the Insert test run: ``` output -- Timers ---------------------------------------------------------------------- @@ -96,4 +100,4 @@ stats-per-run-INSERT 99.9% <= 15.59 milliseconds ``` -The metrics are also output to the `stats-per-second-[mode].csv` which is located in the same folder as the jar file. `[mode]` is the executed mode(s), i.e. either `INSERT`, `UPDATE_ONE`, `UPDATE_MANY`, `COUNT_ONE`, `COUNT_MANY`, `ITERATE_ONE`, `ITERATE_MANY`, `DELETE_ONE` or `DELETE_MANY`. +Detailed per-second metrics are also output to a CSV file named `stats-per-second-[mode].csv` (e.g., `stats-per-second-INSERT.csv`), located in the same folder as the JAR file. `[mode]` corresponds to the executed mode(s), such as `INSERT`, `UPDATE_ONE`, `DELETE_ONE`, etc. diff --git a/content/learning-paths/servers-and-cloud-computing/mongodb/replica.md b/content/learning-paths/servers-and-cloud-computing/mongodb/replica.md deleted file mode 100644 index dd3cb518fd..0000000000 --- a/content/learning-paths/servers-and-cloud-computing/mongodb/replica.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -# User change -title: "Run YCSB using a 3 node replica set" - -draft: true - -weight: 5 # (intro is 1), 2 is first, 3 is second, etc. - -# Do not modify these elements -layout: "learningpathall" ---- - -The recommended MongoDB YCSB test setup is a relica set containing three nodes of equal size. The primary node is the node you send the YCSB traffic to and the others are secondary nodes. - -## What is a replica set? - -A replica set is a group of instances that maintain the same data set. A replica set contains many nodes, but 3 nodes are used for testing. - -## What node size should I use? - -The most common size for testing MongoDB is an 8 vCPU instance. You can test with any sized machine, but if you are looking for ideal testing conditions 8 vCPUs is enough. Each node should have 32GB of RAM. - -You should keep the complete data set in memory. Additional details abut the recommended configuration are provided below. - -## Create a replica set - -Create a 3 node replica set by starting 3 Arm instances with the specifications above. - -Install MongoDB on each node using the previously provided instructions. - -Select 1 instance as the primary node and install YCSB on the instance. - -## Initialize the replica set - -1. Set variables with the IP addresses of each node: - - ```bash - PRIMARY_NODE_IP="" - SECONDARY_NODE1_IP="" - SECONDARY_NODE2_IP="" - ``` - -2. Connect to the primary node using the MongoDB shell: - - ```bash - mongosh --host :27017 - ``` - -3. Initialize the replica set with the following command: - - ```bash - PRIMARY_NODE_IP="" - SECONDARY_NODE1_IP="" - SECONDARY_NODE2_IP="" - - mongosh --host $PRIMARY_NODE_IP:27017 < +This custom PyTorch wheel is prebuilt for Arm CPU architectures and includes the necessary optimizations for running inference. Installing it locally ensures compatibility with your environment and avoids conflicts with default pip packages. +```bash +pip install torch-2.7.0.dev20250306-cp312-cp312-manylinux_2_28_aarch64.whl --force-reinstall --no-deps +``` + +You’re now ready to quantize the model and start serving it with `vLLM` on an Arm-based system. diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM-quant/2-quantize-model.md b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/2-quantize-model.md new file mode 100644 index 0000000000..7a5c745ecd --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/2-quantize-model.md @@ -0,0 +1,189 @@ +--- +title: Quantize and Launch the vLLM server +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Access the Model from Hugging Face + +Before quantizing, authenticate with Hugging Face using a personal access token. You can generate one from your [Hugging Face Hub](https://huggingface.co/) account under Access Tokens: + +```bash +huggingface-cli login --token $hf_token +``` +## Quantization Script Template + +Using a file editor of your choice, create a file named `vllm_quantize_model.py` and copy the content shown below to quantize the model: +```bash +import argparse +import os +from transformers import AutoModelForCausalLM, AutoTokenizer + +from llmcompressor.modifiers.quantization import QuantizationModifier +from compressed_tensors.quantization import QuantizationScheme +from compressed_tensors.quantization.quant_args import ( + QuantizationArgs, + QuantizationStrategy, + QuantizationType, +) +from llmcompressor.transformers import oneshot + + +def main(): + parser = argparse.ArgumentParser( + description="Quantize a model using LLM Compressor with customizable mode, scheme, and group size." + ) + parser.add_argument( + "model_id", + type=str, + help="Model identifier or path (e.g., 'meta-llama/Llama-2-13b-chat-hf' or '/path/to/model')", + ) + parser.add_argument( + "--mode", + type=str, + choices=["int4", "int8"], + required=True, + help="Quantization mode: int4 or int8", + ) + parser.add_argument( + "--scheme", + type=str, + choices=["channelwise", "groupwise"], + required=True, + help="Quantization scheme for weights (groupwise is only supported for int4)", + ) + parser.add_argument( + "--groupsize", + type=int, + default=32, + help="Group size for groupwise quantization (only used when scheme is groupwise). Defaults to 32." + ) + args = parser.parse_args() + + # Validate unsupported configuration + if args.mode == "int8" and args.scheme == "groupwise": + raise ValueError("Groupwise int8 is unsupported. Please use channelwise for int8.") + + # Extract a base model name from the model id or path for the output directory + if "/" in args.model_id: + base_model_name = args.model_id.split("/")[-1] + else: + base_model_name = os.path.basename(args.model_id) + + # Determine output directory based on mode and scheme + if args.mode == "int4": + output_dir = f"{base_model_name}-w4a8-{args.scheme}" + else: # int8 + output_dir = f"{base_model_name}-w8a8-{args.scheme}" + + print(f"Loading model '{args.model_id}'...") + model = AutoModelForCausalLM.from_pretrained( + args.model_id, device_map="auto", torch_dtype="auto", trust_remote_code=True + ) + tokenizer = AutoTokenizer.from_pretrained(args.model_id) + + # Define quantization arguments based on mode and chosen scheme. + if args.mode == "int8": + # Only channelwise is supported for int8. + weights_args = QuantizationArgs( + num_bits=8, + type=QuantizationType.INT, + strategy=QuantizationStrategy.CHANNEL, + symmetric=True, + dynamic=False, + ) + else: # int4 mode + if args.scheme == "channelwise": + strategy = QuantizationStrategy.CHANNEL + weights_args = QuantizationArgs( + num_bits=4, + type=QuantizationType.INT, + strategy=strategy, + symmetric=True, + dynamic=False, + ) + else: # groupwise + strategy = QuantizationStrategy.GROUP + weights_args = QuantizationArgs( + num_bits=4, + type=QuantizationType.INT, + strategy=strategy, + group_size=args.groupsize, + symmetric=True, + dynamic=False + ) + + # Activation quantization remains the same for both modes. + activations_args = QuantizationArgs( + num_bits=8, + type=QuantizationType.INT, + strategy=QuantizationStrategy.TOKEN, + symmetric=False, + dynamic=True, + observer=None, + ) + + # Create a quantization scheme for Linear layers. + scheme = QuantizationScheme( + targets=["Linear"], + weights=weights_args, + input_activations=activations_args, + ) + + # Create a quantization modifier. We ignore the "lm_head" layer. + modifier = QuantizationModifier(config_groups={"group_0": scheme}, ignore=["lm_head"]) + + # Apply quantization and save the quantized model. + oneshot( + model=model, + recipe=modifier, + tokenizer=tokenizer, + output_dir=output_dir, + ) + print(f"Quantized model saved to: {output_dir}") + + +if __name__ == "__main__": + main() + + +``` +Then run the quantization script using `vllm_quantize_model.py`. This generates an INT8 quantized version of the model using channelwise precision, which reduces memory usage while maintaining model accuracy: + +```bash +python vllm_quantize_model.py meta-llama/Llama-3.1-8B-Instruct --mode int8 --scheme channelwise +``` +The quantized model will be saved at: +`$HOME/Llama-3.1-8B-Instruct-w8a8-channelwise`. + +## Launch the vLLM server + +The vLLM server supports the OpenAI-compatible `/v1/chat/completions` API. This is used in this learning path for single-prompt testing with `curl` and for batch testing using a custom Python script that simulates multiple concurrent requests. + +Once the model is quantized, launch the vLLM server to enable CPU-based inference. This configuration uses `tcmalloc` and the optimized `OpenBLAS` build to improve performance and reduce latency: + +```bash +LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libtcmalloc_minimal.so.4:/home/ubuntu/OpenBLAS/libopenblas.so \ +ONEDNN_DEFAULT_FPMATH_MODE=BF16 \ +VLLM_TARGET_DEVICE=cpu \ +VLLM_CPU_KVCACHE_SPACE=32 \ +VLLM_CPU_OMP_THREADS_BIND="0-$(($(nproc) - 1))" \ +vllm serve $HOME/Llama-3.1-8B-Instruct-w8a8-channelwise \ +--dtype float32 --swap-space 16 +``` +This command starts the vLLM server using the quantized model. It preloads `tcmalloc` for efficient memory allocation and uses OpenBLAS for accelerated matrix operations. Thread binding is dynamically set based on the number of available cores to maximize parallelism on Arm CPUs. + +The output from launching the vLLM server with the quantized model should look like: + +```output +INFO 04-23 21:13:59 launcher.py:31] Route: /rerank, Methods: POST +INFO 04-23 21:13:59 launcher.py:31] Route: /v1/rerank, Methods: POST +INFO 04-23 21:13:59 launcher.py:31] Route: /v2/rerank, Methods: POST +INFO 04-23 21:13:59 launcher.py:31] Route: /invocations, Methods: POST +INFO: Started server process [77356] +INFO: Waiting for application startup. +INFO: Application startup complete. +``` + diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM-quant/3-run-benchmark.md b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/3-run-benchmark.md new file mode 100644 index 0000000000..7dab421f50 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/3-run-benchmark.md @@ -0,0 +1,195 @@ +--- +title: vLLM Inference Test +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Run Single Inference + +Once the server is running, open another terminal and verify it is running as expected with a basic single-prompt request using `curl`. This confirms the server is running correctly and that the OpenAI-compatible /v1/chat/completions API is responding as expected: + +```bash +curl http://localhost:8000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "/home/ubuntu/Llama-3.1-8B-Instruct-w8a8-channelwise", + "temperature": "0.0", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "tell me a funny story"} + ] + }' +``` +If the setup is working correctly, you'll receive a streaming response from the vLLM server. + +The server logs will show that the request was processed successfully. You'll also see prompt and generation throughput metrics, which provide a lightweight benchmark of the model’s performance in your environment. + +The following log output was generated from a single-prompt test run using the steps in this learning path: + +```output +INFO: Started server process [201749] +INFO: Waiting for application startup. +INFO: Application startup complete. +INFO 04-10 18:13:14 chat_utils.py:332] Detected the chat template content format to be 'string'. You can set `--chat-template-content-format` to override this. +INFO 04-10 18:13:14 logger.py:39] Received request chatcmpl-a71fae48603c4d90a5d9aa6efd740fec: prompt: '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nCutting Knowledge Date: December 2023\nToday Date: 26 Jul 2024\n\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\ntell me a funny story<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n', params: SamplingParams(n=1, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, temperature=0.0, top_p=1.0, top_k=-1, min_p=0.0, seed=None, stop=[], stop_token_ids=[], bad_words=[], include_stop_str_in_output=False, ignore_eos=False, max_tokens=131026, min_tokens=0, logprobs=None, prompt_logprobs=None, skip_special_tokens=True, spaces_between_special_tokens=True, truncate_prompt_tokens=None, guided_decoding=None), prompt_token_ids: None, lora_request: None, prompt_adapter_request: None. +INFO 04-10 18:13:14 engine.py:275] Added request chatcmpl-a71fae48603c4d90a5d9aa6efd740fec. +WARNING 04-10 18:13:15 cpu.py:143] Pin memory is not supported on CPU. +INFO 04-10 18:13:17 metrics.py:455] Avg prompt throughput: 9.2 tokens/s, Avg generation throughput: 11.6 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.1%, CPU KV cache usage: 0.0%. +INFO 04-10 18:13:22 metrics.py:455] Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 27.0 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.2%, CPU KV cache usage: 0.0%. +INFO 04-10 18:13:27 metrics.py:455] Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 26.5 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.3%, CPU KV cache usage: 0.0%. +INFO: 127.0.0.1:45986 - "POST /v1/chat/completions HTTP/1.1" 200 OK + +``` + +These results confirm that the model is running efficiently on CPU, with stable prompt and generation throughput — a solid baseline before scaling to batch inference. + +## Run Batch Inference + +After confirming single-prompt inference, run batch testing to simulate concurrent load and measure server performance at scale. + +Use the following Python script to simulate concurrent user interactions. + +Save the content shown below in a file named `batch_test.py`: +```python +import requests +import json +import os +import time +import multiprocessing +import argparse + +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKCYAN = '\033[96m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + +# prompts (duplicate questions) +# https://github.com/ggml-org/llama.cpp/blob/b4753/examples/parallel/parallel.cpp#L42-L52 +prompts = [ + #"Tell me a joke about AI.", + "What is the meaning of life?", + "Tell me an interesting fact about llamas.", + "What is the best way to cook a steak?", + "Are you familiar with the Special Theory of Relativity and can you explain it to me?", + "Recommend some interesting books to read.", + "What is the best way to learn a new language?", + "How to get a job at Google?", + "If you could have any superpower, what would it be?", + "I want to learn how to play the piano.", + "What is the meaning of life?", + "Tell me an interesting fact about llamas.", + "What is the best way to cook a steak?", + "Are you familiar with the Special Theory of Relativity and can you explain it to me?", + "Recommend some interesting books to read.", + "What is the best way to learn a new language?", + "How to get a job at Google?", +] + +def get_stream(url, prompt, index): + s = requests.Session() + print(bcolors.OKGREEN, "Sending request #{}".format(index), bcolors.ENDC) + with s.post(url, headers=None, json=prompt, stream=True) as resp: + print(bcolors.WARNING, "Waiting for the reply #{} to the prompt '".format(index) + prompt["messages"][0]["content"] + "'", bcolors.ENDC) + for line in resp.iter_lines(): + if line: + print(line) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # this is a mandatory parameter + parser.add_argument("server", help="llama server IP ir DNS address", type=str) + parser.add_argument("port", help="llama server port", type=int) + parser.add_argument("-s", "--stream", help="stream the reply", action="store_true") + parser.add_argument("-b", "--batch", help="concurrent request batch size", type=int, default=1) + parser.add_argument("--max_tokens", help="maximum output tokens", type=int, default=128) + parser.add_argument("--schema", help="enndpoint schema (http/https)", type=str, default="http", choices=["http", "https"]) + parser.add_argument("-m", "--model", help="model name", type=str) + args = parser.parse_args() + + # by default, OpenAI-compatible API is used for the tests, which is supported by both llama.cpp and vllm + openAPI_endpoint = "/v1/chat/completions" + server = args.schema + "://" + args.server + ":" + str(args.port) + openAPI_endpoint + + print(server) + start = time.time() + + proc = [] + for i in range(args.batch): + prompt = { + "messages": [ + {"role": "user", "content": prompts[i]} + ], + "model": args.model, + "temperature": 0, + "max_tokens": args.max_tokens, # for vllm, it ignores n_predict + "n_predict": args.max_tokens, # for llama.cpp (will be ignored by vllm) + "stream": False # streaming + } + + proc.append(multiprocessing.Process(target=get_stream, args=(server, prompt, i))) + + # start the processes + for p in proc: + p.start() + + # wait for all the processes to finish + for p in proc: + p.join() + + end = time.time() + print("done!") + print(end - start) +``` +Then, run it using: + +```bash +python3 batch_test.py localhost 8000 --schema http --batch 16 -m $HOME/Llama-3.1-8B-Instruct-w8a8-channelwise +``` +This simulates multiple users interacting with the model in parallel and helps validate server-side performance under load. +You can modify the number of requests using the --batch flag or review and edit `batch_test.py` to customize prompt content and concurrency logic. + +When the test completes, server logs will display a summary including average prompt throughput and generation throughput. This helps benchmark how well the model performs under concurrent load on your Arm-based system. + +### Sample Output +Your logs should display successful responses and performance stats, confirming the model handles concurrent requests as expected. + +The following log output was generated from a batch inference run using the steps in this learning path: + +```output +INFO 04-10 18:20:55 metrics.py:455] Avg prompt throughput: 144.4 tokens/s, Avg generation throughput: 153.4 tokens/s, Running: 16 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 1.2%, CPU KV cache usage: 0.0%. +INFO 04-10 18:21:00 metrics.py:455] Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 239.9 tokens/s, Running: 16 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 2.1%, CPU KV cache usage: 0.0%. +INFO: 127.0.0.1:57558 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57574 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57586 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57600 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57604 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57620 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57634 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57638 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57644 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57654 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57660 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57676 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57684 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57696 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57712 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO: 127.0.0.1:57718 - "POST /v1/chat/completions HTTP/1.1" 200 OK +INFO 04-10 18:21:10 metrics.py:455] Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 7.7 tokens/s, Running: 0 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.0%, CPU KV cache usage: 0.0 +``` + +This output confirms the server is handling concurrent requests effectively, with consistent generation throughput across 16 requests — a strong indication of stable multi-request performance on CPU. + +### Go Beyond: Power Up Your vLLM Workflow +Now that you’ve successfully quantized and served a model using vLLM on Arm, here are some further ways to explore: + +* **Try different models:** Apply the same steps to other [Hugging Face models](https://huggingface.co/models) like Qwen or Gemma. + +* **Connect a chat client:** Link your server with OpenAI-compatible UIs like [Open WebUI](https://github.com/open-webui/open-webui) or explore [OpenAI-compatible clients](https://github.com/topics/openai-api-client). diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM-quant/_index.md b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/_index.md new file mode 100644 index 0000000000..94a73e92e0 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/_index.md @@ -0,0 +1,64 @@ +--- +title: Quantize and Run a Large Language Model using vLLM on Arm Servers + +draft: true +cascade: + draft: true + +minutes_to_complete: 45 + +who_is_this_for: This learning path is intended for software developers and AI engineers interested in optimizing and deploying large language models using vLLM on Arm-based servers. It’s ideal for those looking to explore CPU-based inference and model quantization techniques. + +learning_objectives: + - Build and configure OpenBLAS to optimize LLM performance. + - Set up vLLM and PyTorch using builds optimized for Arm CPUs. + - Download and quantize a large language model using INT8 techniques. + - Launch a vLLM server to serve the quantized model. + - Run single-prompt and batch inference using the vLLM OpenAI-compatible API. + + +prerequisites: + - An Arm-based server or cloud instance running with at least 32 CPU cores, 64 GB RAM and 32 GB of available disk space. + - Familiarity with Python and machine learning concepts. + - An active Hugging Face account with access to the target model. + +author: + - Rani Chowdary Mandepudi + - Phalani Paladugu + +### Tags +skilllevels: Introductory +subjects: ML +armips: + - Neoverse +operatingsystems: + - Linux +tools_software_languages: + - vLLM + - LLM + - GenAI + - Python + + +further_reading: + - resource: + title: vLLM Documentation + link: https://docs.vllm.ai/ + type: documentation + - resource: + title: vLLM GitHub Repository + link: https://github.com/vllm-project/vllm + type: github + - resource: + title: Hugging Face Model Hub + link: https://huggingface.co/models + type: website + + + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM-quant/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/vLLM-quant/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/vectorscan/_index.md b/content/learning-paths/servers-and-cloud-computing/vectorscan/_index.md index ec9552d526..8e738ef3aa 100644 --- a/content/learning-paths/servers-and-cloud-computing/vectorscan/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/vectorscan/_index.md @@ -4,7 +4,7 @@ title: Install Vectorscan (Hyperscan on Arm) and use it with Snort 3 minutes_to_complete: 15 who_is_this_for: This is an introductory topic for software developers using Hyperscan who want to migrate to Arm. - + learning_objectives: - Install and run Vectorscan on an Arm-based instance @@ -22,7 +22,7 @@ subjects: Libraries armips: - Neoverse tools_software_languages: - - Vectorscan + - Vectorscan operatingsystems: - Linux @@ -30,8 +30,6 @@ test_images: - ubuntu:latest test_link: https://github.com/armflorentlebeau/arm-learning-paths/actions/runs/4312122327 test_maintenance: true -test_status: -- passed further_reading: - resource: diff --git a/content/learning-paths/servers-and-cloud-computing/vectorscan/install.md b/content/learning-paths/servers-and-cloud-computing/vectorscan/install.md index 06d9b45225..b51267c4c7 100644 --- a/content/learning-paths/servers-and-cloud-computing/vectorscan/install.md +++ b/content/learning-paths/servers-and-cloud-computing/vectorscan/install.md @@ -16,13 +16,13 @@ This Learning Path explains how to use Vectorscan on Arm and provides an example ## Before you begin -You should have an Arm server available with Ubuntu 20.04 or Ubuntu 22.04 installed. +You should have an Arm server available with Ubuntu 20.04 or Ubuntu 22.04 installed. The instructions provided have been tested on an Ubuntu 22.04 AWS Arm EC2 instance (c6g.xlarge) and Ubuntu 20.04 Oracle Ampere A1 instance. ### Software dependencies -Before building Vectorscan, install the following software. +Before building Vectorscan, install the following software. Update the sources list for the package manager. @@ -33,12 +33,12 @@ sudo apt update GCC for your Arm Linux distribution. If needed, refer to the [installation guide](/install-guides/gcc/native/). ```bash -sudo apt install -y build-essential +sudo apt install -y build-essential ``` [CMake build system](https://cmake.org/): -```bash +```bash sudo apt install -y cmake ``` @@ -83,9 +83,9 @@ cd vectorscan ## Determine if your processor has SVE -[Scalable Vector Extensions (SVE)](https://developer.arm.com/Architectures/Scalable%20Vector%20Extensions) is a SIMD extension of the Arm architecture which is available on some Arm processors. For example, the Neoverse-N1 does not include SVE and the Neoverse-V1 does include SVE. +[Scalable Vector Extensions (SVE)](https://developer.arm.com/Architectures/Scalable%20Vector%20Extensions) is a SIMD extension of the Arm architecture which is available on some Arm processors. For example, the Neoverse-N1 does not include SVE and the Neoverse-V1 does include SVE. -Vectorscan will run faster if you have an processor with SVE and you enable it when building the software. +Vectorscan will run faster if you have an processor with SVE and you enable it when building the software. To determine if SVE is available on your processor run: @@ -93,15 +93,15 @@ To determine if SVE is available on your processor run: lscpu | grep sve ``` -If SVE is available the Flags will be printed: +If SVE is available the Flags will be printed: ```output Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm jscvt fcma lrcpc dcpop sha3 sm3 sm4 asimddp sha512 sve asimdfhm dit uscat ilrcpc flagm ssbs paca pacg dcpodp svei8mm svebf16 i8mm bf16 dgh rng ``` -If no SVE is present, there will be no output. +If no SVE is present, there will be no output. -### Build Vectorscan +### Build Vectorscan Create a build directory and build with cmake: @@ -117,7 +117,7 @@ cmake -DBUILD_SVE=1 ../ Use `make` to build the vectorscan library: -```bash { cwd="./vectorscan/vectorscan-build" } +```bash { cwd="./vectorscan/build" } make -j$(nproc) ``` @@ -127,8 +127,8 @@ The executables from the build are created in the `bin` directory. Run a check to validate that `Vectorscan` is built and running correctly: -```bash { cwd="./vectorscan/vectorscan-build" } -./bin/unit-hyperscan +```bash { cwd="./vectorscan/build" } +ls bin && ./bin/unit-hyperscan ``` All the unit tests should run successfully. At the end of execution you will see output similar to: diff --git a/content/learning-paths/servers-and-cloud-computing/vectorscan/snort.md b/content/learning-paths/servers-and-cloud-computing/vectorscan/snort.md index 38f4612614..d7baf29a04 100644 --- a/content/learning-paths/servers-and-cloud-computing/vectorscan/snort.md +++ b/content/learning-paths/servers-and-cloud-computing/vectorscan/snort.md @@ -15,16 +15,16 @@ You can install Snort 3 on an Ubuntu Linux Arm-based server, and run it with Vec ## Before you begin -You should already have an Arm server running Ubuntu Linux from the previous topic. +You should already have an Arm server running Ubuntu Linux from the previous topic. Install the Snort 3 dependencies: ```bash -sudo apt update +sudo apt update sudo apt-get install -y build-essential autotools-dev libdumbnet-dev libluajit-5.1-dev libpcap-dev \ zlib1g-dev pkg-config libhwloc-dev cmake liblzma-dev openssl libssl-dev cpputest libsqlite3-dev \ libtool uuid-dev git autoconf bison flex libcmocka-dev libnetfilter-queue-dev libunwind-dev \ -libmnl-dev ethtool libjemalloc-dev ragel +libmnl-dev ethtool libjemalloc-dev ragel libfl-dev ``` ## Download and install other required software @@ -75,26 +75,26 @@ Download (but do not build) [Boost C++ Libraries](https://www.boost.org/): ```bash cd ~/snort_src -wget https://boostorg.jfrog.io/artifactory/main/release/1.85.0/source/boost_1_85_0.tar.gz -tar -xvzf boost_1_85_0.tar.gz +wget https://archives.boost.io/release/1.87.0/source/boost_1_87_0.tar.gz +tar -xvzf boost_1_87_0.tar.gz ``` Download Vectorscan: ```bash cd ~/snort_src -git clone https://github.com/VectorCamp/vectorscan -cd vectorscan -cd .. -mkdir hyperscan-build -cd hyperscan-build +git clone https://github.com/VectorCamp/vectorscan +cd vectorscan +cd .. +mkdir hyperscan-build +cd hyperscan-build ``` Configure and build Vectorscan: ```bash { cwd="snort_src/hyperscan-build" } cmake -DCMAKE_INSTALL_PREFIX=/usr/local -DBOOST_ROOT=~/snort_src/boost_1_85_0/ ~/snort_src/vectorscan/ -make -j$(nproc) && sudo make install +make -j$(nproc) && sudo make install ``` Install [FlatBuffers](https://google.github.io/flatbuffers/): @@ -138,7 +138,7 @@ cd ~/snort_src wget https://github.com/snort3/snort3/archive/refs/tags/3.2.2.0.tar.gz -O snort3-3.2.2.0.tar.gz tar -xzvf snort3-3.2.2.0.tar.gz cd snort3-3.2.2.0 -./configure_cmake.sh --prefix=/usr/local --enable-tcmalloc +./configure_cmake.sh --prefix=/usr/local --enable-tcmalloc cd build make -j$(nproc) sudo make install @@ -146,7 +146,7 @@ sudo make install ## Confirm Snort 3 is installed and running properly -Snort 3 should be installed in `/usr/local/bin`. +Snort 3 should be installed in `/usr/local/bin`. Verify it is installed and running correctly by printing the version: @@ -178,7 +178,7 @@ You should see output similar to the following: You can test the performance of Snort 3 with Vectorscan on your Arm instance. -Download a capture file to using for testing: +Download a capture file to using for testing: ```bash mkdir ~/snort3_test diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM/_index.md b/content/learning-paths/servers-and-cloud-computing/vllm/_index.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/vLLM/_index.md rename to content/learning-paths/servers-and-cloud-computing/vllm/_index.md diff --git a/content/learning-paths/servers-and-cloud-computing/vllm/_next-steps.md b/content/learning-paths/servers-and-cloud-computing/vllm/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/servers-and-cloud-computing/vllm/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM/vllm-run.md b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-run.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/vLLM/vllm-run.md rename to content/learning-paths/servers-and-cloud-computing/vllm/vllm-run.md diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM/vllm-server.md b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-server.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/vLLM/vllm-server.md rename to content/learning-paths/servers-and-cloud-computing/vllm/vllm-server.md diff --git a/content/learning-paths/servers-and-cloud-computing/vLLM/vllm-setup.md b/content/learning-paths/servers-and-cloud-computing/vllm/vllm-setup.md similarity index 100% rename from content/learning-paths/servers-and-cloud-computing/vLLM/vllm-setup.md rename to content/learning-paths/servers-and-cloud-computing/vllm/vllm-setup.md diff --git a/content/learning-paths/servers-and-cloud-computing/zlib/_index.md b/content/learning-paths/servers-and-cloud-computing/zlib/_index.md index b55a0f81b6..c590d14147 100644 --- a/content/learning-paths/servers-and-cloud-computing/zlib/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/zlib/_index.md @@ -1,29 +1,36 @@ --- -armips: -- Neoverse -author: Jason Andrews -layout: learningpathall +title: Learn how to build and use Cloudflare zlib on Arm servers + +minutes_to_complete: 30 + +who_is_this_for: This is an introductory topic for software developers to learn how + to to build and use Cloudflare zlib on Arm servers. + learning_objectives: - Build zlib-cloudflare on an Arm server - Run example applications using zlib - Measure and analyze performance to see improvements -learning_path_main_page: 'yes' -minutes_to_complete: 30 -operatingsystems: -- Linux + prerequisites: - An Arm Linux computer or an [Arm based instance](/learning-paths/servers-and-cloud-computing/csp/) from a cloud service provider running Ubuntu 20.04 or Ubuntu 22.04. -skilllevels: Introductory -subjects: Libraries + +author: Jason Andrews + test_images: - ubuntu:latest -test_link: +test_link: test_maintenance: true -test_status: -- passed -title: Learn how to build and use Cloudflare zlib on Arm servers + +### Tags +armips: +- Neoverse +skilllevels: Introductory +subjects: Libraries +operatingsystems: +- Linux tools_software_languages: - zlib + further_reading: - resource: title: Cloudflare zlib on GitHub @@ -34,9 +41,9 @@ further_reading: link: https://aws.amazon.com/blogs/opensource/improving-zlib-cloudflare-and-comparing-performance-with-other-zlib-forks/ type: blog - - -weight: 1 -who_is_this_for: This is an introductory topic for software developers to learn how - to to build and use Cloudflare zlib on Arm servers. +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. --- diff --git a/content/learning-paths/servers-and-cloud-computing/zlib/perf.md b/content/learning-paths/servers-and-cloud-computing/zlib/perf.md index 48941244ef..ab2c568021 100644 --- a/content/learning-paths/servers-and-cloud-computing/zlib/perf.md +++ b/content/learning-paths/servers-and-cloud-computing/zlib/perf.md @@ -28,8 +28,8 @@ The previous section explained how to run a Python program to compress large fil Continue with the same `zip.py` program as the previous section. Make sure to start with `zip.py` and `largefile` available. Confirm the application is working and `largefile.gz` is created when it is run. -```bash -python ./zip.py +```console +python zip.py ``` ## Run the example with perf using the default zlib @@ -40,7 +40,7 @@ Run with the default `zlib` and time the execution. perf stat python ./zip.py ``` -The `perf stat` command will display counts of a few selected PMU events. +The `perf stat` command will display counts of a few selected PMU events. ## Use perf record and generate the flame graph @@ -50,7 +50,7 @@ You can also record the application activity with `perf record`. `-F` specifies perf record -F 99 -g python ./zip.py ``` -To visualize the results, you can generate an image with `FlameGraph`. Install it with: +To visualize the results, you can generate an image with `FlameGraph`. Install it with: ```bash sudo apt install git -y @@ -77,9 +77,9 @@ Note that the `zlib` and the `deflate` function are taking significant time. ## Run the example again with perf stat and zlib-cloudflare -This time use `LD_PRELOAD` to change to `zlib-cloudflare` instead and check the performance difference. +This time use `LD_PRELOAD` to change to `zlib-cloudflare` instead and check the performance difference. -Adjust the path to the Cloudflare `libz.so` as needed. +Adjust the path to the Cloudflare `libz.so` as needed. ```console LD_PRELOAD=/usr/local/lib/libz.so perf stat python ./zip.py diff --git a/content/learning-paths/servers-and-cloud-computing/zlib/py-zlib.md b/content/learning-paths/servers-and-cloud-computing/zlib/py-zlib.md index 157c030cb8..1dcf263b61 100644 --- a/content/learning-paths/servers-and-cloud-computing/zlib/py-zlib.md +++ b/content/learning-paths/servers-and-cloud-computing/zlib/py-zlib.md @@ -6,7 +6,7 @@ weight: 3 ## Install necessary software packages -* Make sure `python3` is available when `python` is run. +* Make sure `python3` is available when `python` is run. ```bash sudo apt install python-is-python3 -y @@ -14,7 +14,7 @@ sudo apt install python-is-python3 -y ## Detailed Steps -The previous section explained how to build the Cloudflare `zlib` which includes the use of `crc32` instructions to improve performance on data compression. +The previous section explained how to build the Cloudflare `zlib` which includes the use of `crc32` instructions to improve performance on data compression. Use a Python example and measure the performance difference with `zlib-cloudflare`. @@ -48,16 +48,16 @@ dd if=/dev/zero of=largefile count=1M bs=1024 Run with the default `zlib` and time the execution. ```bash -time python ./zip.py +time python zip.py ``` -Make a note of how many seconds the program took. +Make a note of how many seconds the program took. ## Run the example again with zlib-cloudflare -This time, use `LD_PRELOAD` to change to `zlib-cloudflare` instead and check the performance difference. +This time, use `LD_PRELOAD` to change to `zlib-cloudflare` instead and check the performance difference. -Adjust the path to `libz.so` as needed. +Adjust the path to `libz.so` as needed. ```bash time LD_PRELOAD=/usr/local/lib/libz.so python ./zip.py diff --git a/data/stats_current_test_info.yml b/data/stats_current_test_info.yml index 1e13adff55..ff89188386 100644 --- a/data/stats_current_test_info.yml +++ b/data/stats_current_test_info.yml @@ -111,6 +111,7 @@ sw_categories: readable_title: GFortran tests_and_status: - ubuntu:latest: passed + - fedora:latest: passed go: readable_title: Go tests_and_status: [] diff --git a/data/stats_weekly_data.yml b/data/stats_weekly_data.yml index c17633959e..c9a26074a7 100644 --- a/data/stats_weekly_data.yml +++ b/data/stats_weekly_data.yml @@ -5674,6 +5674,7 @@ pranay-bakre: 5 preema-merlin-dsouza: 1 przemyslaw-wirkus: 2 + rani-chowdary-mandepudi: 1 ravi-malhotra: 1 rin-dobrescu: 1 roberto-lopez-mendez: 2 @@ -5776,10 +5777,10 @@ pranay-bakre: 5 preema-merlin-dsouza: 1 przemyslaw-wirkus: 2 + rani-chowdary-mandepudi: 1 ravi-malhotra: 1 rin-dobrescu: 1 roberto-lopez-mendez: 2 - ronan-synnott: 45 shuheng-deng: 1 thirdai: 1 tianyu-li: 1 diff --git a/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html b/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html index 3d2d0da93a..ca5cc446bf 100644 --- a/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html +++ b/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html @@ -9,6 +9,10 @@ {{partial "head/analytics.html" .}} {{partial "head/conditional-redirect.html" .}} {{partial "head/hotjar-surveys.html" .}} + + {{ if .Param "math" }} + {{ partialCached "math.html" . }} + {{ end }} {{partial "header/nav-masthead.html" .}} diff --git a/themes/arm-design-system-hugo-theme/layouts/partials/math.html b/themes/arm-design-system-hugo-theme/layouts/partials/math.html new file mode 100644 index 0000000000..1f74076371 --- /dev/null +++ b/themes/arm-design-system-hugo-theme/layouts/partials/math.html @@ -0,0 +1,12 @@ + + \ No newline at end of file diff --git a/tools/check.py b/tools/check.py index ff1e940da8..0fb93b39d2 100644 --- a/tools/check.py +++ b/tools/check.py @@ -85,9 +85,9 @@ def write_commands_to_file(test_cmd_filename, test): # - Working directory is specified # - An environment variable is specified cmd_args = { - "env_source":"source", + "env_source":".", "cwd":"cd ", - "env":"export" + "env=":"export" } for cmd_arg in cmd_args.keys(): if cmd_arg in test: @@ -153,6 +153,11 @@ def check(json_file, start, stop, md_article): test_images = data["test_images"] for n_image, test_image in zip(range(0, len(test_images)), test_images): logging.info(f"--- Testing on {test_image} ---") + + if test_image != "ubuntu:latest": + container_name = init_container(i_img=n_image, img=test_image) + logging.info(f"{container_name} initialized") + with alive_progress.alive_bar(data["ntests"], title=test_image, stats=False) as bar: for n_test in range(0, data["ntests"]): if dictionary_lookup(data, f"{n_test}"): @@ -161,9 +166,10 @@ def check(json_file, start, stop, md_article): logging.info(f"Error getting test from JSON file, skipping") continue - test_target = test.get("target") + test_target = test.get("target") or "" if test_target and test_target != test_image: - pass + bar(skipped=True) + continue elif not test_target: pass elif test_target: @@ -184,12 +190,23 @@ def check(json_file, start, stop, md_article): test_type = test["type"] # Check type if test_type == "bash": - # chmod cmd file - run_command = [f"chmod +x {test_cmd_filename}"] - subprocess.run(run_command, shell=True, capture_output=True) - logging.debug(run_command) - # execute file as is with bash - run_command = [f"bash ./{test_cmd_filename}"] + if "ubuntu" in test_image: + # chmod cmd file + run_command = [f"chmod +x {test_cmd_filename}"] + subprocess.run(run_command, shell=True, capture_output=True) + logging.debug(run_command) + # execute file as is with bash + run_command = [f"bash ./{test_cmd_filename}"] + elif "fedora" in test_target: + # copy files to docker + docker_cmd = [f"docker cp {test_cmd_filename} test_{n_image}:/home/{username}/"] + subprocess.run(docker_cmd, shell=True, capture_output=True) + logging.debug(docker_cmd) + run_command = [f"docker exec -u {username} -w /home/{username} test_{n_image} bash {test_cmd_filename}"] + else: + logging.debug(f"Image {test_image} not supported for testing. Contact the maintainers if you think this is a mistake.") + bar(skipped=True) + continue elif test_type == "fvp": # Start instance for image if start: @@ -283,12 +300,18 @@ def check(json_file, start, stop, md_article): if paths_to_remove: logging.info(f"Removing files that were created during testing from repository") for path in paths_to_remove: - if os.path.isfile(path) or os.path.islink(path): - os.remove(path) + try: + + if os.path.isfile(path) or os.path.islink(path): + os.chmod(path, 0o777) + os.remove(path) + - elif os.path.isdir(path): - shutil.rmtree(path) - logging.debug(f"Removed {path}") + elif os.path.isdir(path): + shutil.rmtree(path) + logging.debug(f"Removed {path}") + except PermissionError as e: + logging.debug(f"Failed to remove {path} with error: {e}") # Stop instance if stop: diff --git a/tools/patch.py b/tools/patch.py index 36513cf7c1..338692c1ef 100644 --- a/tools/patch.py +++ b/tools/patch.py @@ -19,8 +19,13 @@ def patch(article_path: str, results: dict, link: str): content_type, sw_category, content_title = article_path_parts article_path = PurePath(article_path, "_index.md") elif "install-guides" in article_path_parts: - content_type, content_title = article_path_parts - content_title = content_title.strip(".md") + # In case the install guide is in a subdirectory + if len(article_path_parts) > 3: + content_type, subdirectory, content_title, *others = article_path_parts + else: + content_type, content_title, *others = article_path_parts + # Remove ".md" from the content title if it exists + content_title = content_title[:-3] if content_title.endswith(".md") else content_title sw_category = content_type else: raise SystemExit("Unknown content path, pass learning paths or install guides only") @@ -32,7 +37,6 @@ def patch(article_path: str, results: dict, link: str): for image, i in zip(test_images, range(len(test_images))): if content_title not in data["sw_categories"][sw_category]: raise SystemExit(f"{content_title} does not exist in {stats_file}. Add it to update the stats report.") - data["sw_categories"][sw_category][content_title]["tests_and_status"][i][image] = results_values[results[image]] if link: