diff --git a/config.toml b/config.toml index ee086f99cc..a262d66618 100644 --- a/config.toml +++ b/config.toml @@ -27,6 +27,12 @@ cloudFrontDistributionID = "E2NEF61QWPFRIH" [markup.goldmark] [markup.goldmark.renderer] unsafe = true + [markup.goldmark.extensions] + [markup.goldmark.extensions.passthrough] + enable = true + [markup.goldmark.extensions.passthrough.delimiters] + block = [['\[', '\]'], ['$$', '$$']] + inline = [['\(', '\)']] [frontmatter] lastmod = ["lastmod", ":git", "date", "publishDate"] @@ -83,3 +89,5 @@ title = 'Arm Learning Paths' description = 'Tutorials with code examples, created by the Arm ecosystem to develop better code faster across all platforms: Servers, phones, laptops, embedded devices, and microcontrollers.' social_image = '/img/social-image.png' twitter_handle = '@ArmSoftwareDev' + +math = true diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_index.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_index.md new file mode 100644 index 0000000000..b149fb3eb9 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_index.md @@ -0,0 +1,71 @@ +--- +title: Getting Started with CMSIS-DSP Using Python + +minutes_to_complete: 30 + +who_is_this_for: Developers writing DSP/AI software + +learning_objectives: + - Understand how to use the CMSIS-DSP Python package + - Understand how the Python implementation maps to the C implementation + - Develop a complex application using CMSIS-DSP + +prerequisites: + - Some familiarity with DSP programming + - Some familiarity with Python programming + - Knowledge of C + - Some familiarity with CMSIS-DSP + - Python installed on your system + +author: Christophe Favergeon + +### Tags +skilllevels: Advanced +subjects: Libraries +armips: + - Cortex-M + - Cortex-A +tools_software_languages: + - VS Code + - CMSIS-DSP + - Python + - C + - Jupyter Notebook +operatingsystems: + - Linux + - Windows + - macOS + + + + + +further_reading: + - resource: + title: Biquad filters with CMSIS-DSP Python package + link: https://developer.arm.com/documentation/102463/latest/ + type: documentation + - resource: + title: CMSIS-DSP library + link: https://github.com/ARM-software/CMSIS-DSP + type: Open-source project + - resource: + title: CMSIS-DSP python package + link: https://pypi.org/project/cmsisdsp/ + type: Open-source project + - resource: + title: CMSIS-DSP Python package examples and tests + link: https://github.com/ARM-software/CMSIS-DSP/tree/main/PythonWrapper/examples + type: Open-source project + - resource: + title: CMSIS-Stream + link: https://github.com/ARM-software/CMSIS-Stream + type: Open-source project + + +### FIXED, DO NOT MODIFY +# ================================================================================ +weight: 1 # _index.md always has weight of 1 to order correctly +layout: "learningpathall" # All files under learning paths have this same wrapper +learning_path_main_page: "yes" # This should be surfaced when looking for related content. Only set for _index.md of learning path content. +--- diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_next-steps.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_next-steps.md new file mode 100644 index 0000000000..c3db0de5a2 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/_next-steps.md @@ -0,0 +1,8 @@ +--- +# ================================================================================ +# FIXED, DO NOT MODIFY THIS FILE +# ================================================================================ +weight: 21 # Set to always be larger than the content in this path to be at the end of the navigation. +title: "Next Steps" # Always the same, html page title. +layout: "learningpathall" # All files under learning paths have this same wrapper for Hugo processing. +--- diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/audiowidget.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/audiowidget.png new file mode 100644 index 0000000000..7d6630778b Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/audiowidget.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/cleaned.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/cleaned.png new file mode 100644 index 0000000000..4bb3e0fd70 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/cleaned.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/hanning.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/hanning.png new file mode 100644 index 0000000000..e36e52d1a1 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/hanning.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-1.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-1.md new file mode 100644 index 0000000000..7efc9b88da --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-1.md @@ -0,0 +1,24 @@ +--- +title: What is the CMSIS-DSP Python package ? +weight: 2 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## What is CMSIS-DSP ? + +CMSIS-DSP is a general-purpose compute library with a focus on DSP. It was initially developed for Cortex-M processors and has recently been upgraded to also support Cortex-A. + +On each processor, CMSIS-DSP is optimized for the architecture: DSP extensions on M4 and M7; Helium on M55 and M85; Neon on A55, etc. + +## What is the CMSIS-DSP Python package ? + +The CMSIS-DSP Python package is a Python API for CMSIS-DSP. Its goal is to make it easier to develop a C solution using CMSIS-DSP by decreasing the gap between a design environment like Python and the final C implementation. + +For this reason, the Python API is as close as possible to the C one. + +Fixed-point arithmetic is rarely provided by Python packages, which generally focus on floating-point operations. The CMSIS-DSP Python package provides the same fixed-point arithmetic functions as the C version: Q31, Q15 and Q7. The package also provides floating-point functions and will also support half-precision floats in the future, like the C API. + +Finally, the CMSIS-DSP Python package is compatible with NumPy and can be used with all other scientific and AI Python packages such as SciPy and PyTorch. + diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-2.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-2.md new file mode 100644 index 0000000000..a06c50a31a --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-2.md @@ -0,0 +1,76 @@ +--- +title: Install the Python packages +weight: 3 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Installing the Python packages +The application you will develop with CMSIS-DSP requires a few additional Python packages besides CMSIS-DSP. These need to be installed before you start writing code. + +Activate the Python environment you have chosen. + +The first package to install is CMSIS-DSP: + +```bash +pip install cmsisdsp +``` +It will also install `NumPy`, which is a dependency of the CMSIS-DSP Python package. + +You'll be working with a Jupyter notebook, so the jupyter package must also be installed: + +```bash +pip install jupyter +``` + +In the Jupyter notebook, you'll be using widgets to play sound, so you'll need to install some additional Jupyter widgets. + +```bash +pip install ipywidgets +``` + +Finally, you'll need packages to read sound files and display plots: + + +```bash +pip install soundfile +pip install matplotlib +``` + +you can now launch the Jupyter notebook: + +```bash +jupyter notebook +``` +Create a new Jupyter notebook by clicking `new` and selecting `Python 3 (ipykernel)`. + +The new notebook will be named `Untitled`. Rename it to something more descriptive. + +You can now import all the required packages. + +Type the following Python code into your notebook and run the cell (shift-enter). +All the Python code in this learning path is intended to be executed in the same Jupyter notebook. + +```python +import cmsisdsp as dsp +import cmsisdsp.fixedpoint as fix +import numpy as np +from numpy.lib.stride_tricks import sliding_window_view + +# Package for plotting +import matplotlib.pyplot as plt + +# Package to display audio widgets in the notebook and upload sound files +import ipywidgets +from IPython.display import display,Audio + +# To convert a sound file to a NumPy array +import io +import soundfile as sf + +# To load test patterns from the Arm Virtual Hardware Echo Canceller dem +from urllib.request import urlopen +``` + +You're now ready to move on to the next steps. \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-3.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-3.md new file mode 100644 index 0000000000..1f5abe6aa3 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-3.md @@ -0,0 +1,87 @@ +--- +title: Load an audio file +weight: 4 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Load an audio file + +Load an audio file from one of the Arm demo repositories on GitHub. + + +```python +test_pattern_url="https://github.com/ARM-software/VHT-SystemModeling/blob/main/EchoCanceller/sounds/yesno.wav?raw=true" +f = urlopen(test_pattern_url) +filedata = f.read() +``` + +You can now play and listen to the audio: +```python +audio=Audio(data=filedata,autoplay=False) +audio +``` + +An audio widget will appear in your Jupyter notebook. It will look like this: + +![audio widget alt-text#center](audiowidget.png "Figure 1. Audio widget") + +You can use it to listen to the audio. + +You'll hear a sequence of the words "yes" and "no", with some noise between them. +The goal of this learning path is to design an algorithm to remove the noise. + + +Next, convert the audio into a NumPy array so that it can be processed using CMSIS-DSP: + +```python +data, samplerate = sf.read(io.BytesIO(filedata)) +if len(data.shape)>1: + data=data[:,0] +data = data.astype(np.float32) +data=data/np.max(np.abs(data)) +dataQ15 = fix.toQ15(data) +``` + +The code above does the following: +- Converts the audio into a NumPy array +- If the audio is stereo, only one channel is kept +- Normalizes the audio to ensure no value exceeds 1 +- Converts the audio to Q15 fixed-point representation to enable the use of CMSIS-DSP fixed-point functions + +Now, plot the audio waveform: + +```python +plt.plot(data) +plt.show() +``` + +You'll get the following output: + +![audio signal alt-text#center](signal.png "Figure 2. Audio signal") + +In the picture, you can see a sequence of words. Between the words, the signal is not zero: there is some noise. + +In a real application, you don't wait for the entire signal to be received. The signal is continuous. The samples are processed as they are received. Processing can either be sample-based or block-based. For this learning path, the processing will be block-based. + +Before you can move to the next step, this signal must be split into blocks. The processing will occur on small blocks of samples of a given duration. + + + +```python +winDuration=30e-3/6 +winOverlap=15e-3/6 + +winLength=int(np.floor(samplerate*winDuration)) +winOverlap=int(np.floor(samplerate*winOverlap)) +slices=sliding_window_view(data,winLength)[::winOverlap,:] +slices_q15=sliding_window_view(dataQ15,winLength)[::winOverlap,:] +``` + +Refer to the [NumPy documentation](https://numpy.org/doc/stable/reference/generated/numpy.lib.stride_tricks.sliding_window_view.html) for details about `sliding_window_view`. It's not the most efficient function, but it is sufficient for this tutorial. + +The signal is split into overlapping blocks: each block reuses half of the samples from the previous block as defined by the `winOverlap` variable. + +You are now ready to move on to the next step: you have an audio signal that has been split into overlapping blocks, and processing will occur on those blocks. + diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md new file mode 100644 index 0000000000..d85a8dc10a --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-4.md @@ -0,0 +1,125 @@ +--- +title: Write a simple VAD +weight: 5 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Write a simple voice activity detection + +To remove the noise between speech segments, you need to detect when voice is present. + +Voice activity detection can be complex, but for this learning path, you'll implement a very simple and naive approach based on energy. The idea is that if the environment isn't too noisy, speech should have more energy than the noise. + +The detection will rely on a comparison with a threshold that must be manually tuned. + +You'll first implement a version of the voice activity detection (VAD) with NumPy, which will serve as a reference. + +Then you'll implement the same version using CMSIS-DSP with the Q15 fixed-point format. + +### NumPy VAD + +First, you need to compute the energy of the signal within a block of samples. You'll ignore any constant component and focus only on the varying part of the signal: + +```python +# Energy of the window +def signal_energy(window): + w = window - np.mean(window) + return(10*np.log10(np.sum(window * window))) +``` +Then, compare the energy to a threshold to determine whether the block of audio is speech or noise: + +```python +def signal_vad(window): + if signal_energy(window)>-11: + return(1) + else: + return(0) +``` + +The threshold is hard-coded. It's not a very clean solution, but it's sufficient for a tutorial. + +When using such a detector, you'll quickly find that it is not sufficient. You'll need another pass to clean up the detection signal. + +```python +def clean_vad(v): + v = np.hstack([[0],v,[0]]) + # Remove isolated peak + vmin=[np.min(l) for l in sliding_window_view(v,3)] + vmin = np.hstack([[0,0],vmin,[0]]) + # Remove isolated hole + vmax=[np.max(l) for l in sliding_window_view(vmin,4)] + return(vmax) +``` + +Now you can apply this algorithm to the audio signal and plot the VAD detection over it to see if it's working: + +```python +_,ax=plt.subplots(1,1) +cleaned=clean_vad([signal_vad(w) for w in slices]) +vad = np.array([[w]*(winLength-winOverlap) for w in cleaned]).flatten() +ax.plot(data) +ax.plot(vad) +``` +![vad alt-text#center](vad.png "Figure 3. VAD") + +The reference implementation works. You can now implement the same version using CMSIS-DSP. + +### CMSIS-DSP Q15 VAD + +First, you need to compute the signal energy from audio in Q15 format using CMSIS-DSP. + +If you look at the CMSIS-DSP documentation, you'll see that the power and log functions don't produce results in Q15 format. Tracking the fixed-point format throughout all lines of an algorithm can be challenging. + +For this tutorial, instead of trying to determine the exact fixed-point format of the output and applying the necessary shift to adjust the output's fixed-point format, we'll simply tune the threshold of the detection function. + +```python +def signal_energy_q15(window): + mean=dsp.arm_mean_q15(window) + # Subtracting the mean won't cause saturation + # So we use the CMSIS-DSP negate function on an array containing a single sample. + neg_mean=dsp.arm_negate_q15([mean])[0] + window=dsp.arm_offset_q15(window,neg_mean) + energy=dsp.arm_power_q15(window) + # Energy is not in Q15 format (refer to the CMSIS-DSP documentation). + energy=dsp.ssat(energy>>20,16) + dB=dsp.arm_vlog_q15([energy]) + # The output of the `vlog` is not in q15 + # The multiplication by 10 is missing compared to the NumPy + # reference implementation. + # The result of this function is not equivalent to the float implementation due to different + # formats used in intermediate computations. + # As a consequence, a different threshold must be used to compensate for these differences. + return(dB[0]) +``` + +The comparison function is very similar to the NumPy reference, but the threshold is different: + +```python +def signal_vad_q15(window): + # The threshold is not directly comparable to the float implementation + # due to the different intermediate formats used in the fixed-point implementation. + if signal_energy_q15(window)>fix.toQ15(-0.38): + return(1) + else: + return(0) +``` + +Note that in a C code, you would use the output of `fix.toQ15(-0.38)`. + +`fix.toQ15` is a utility of the Python package to convert float to fixed-point. It is not available in the CMSIS-DSP C implementation. +CMSIS-DSP C has functions like `arm_float_to_q15` which work on arrays and are meant to be used at runtime. If you need a precomputed constant, you can use a utility function like `fix.toQ15` and use the resulting value in the code. + +The clean VAD function is the same for both the NumPy and Q15 versions. + +Now you can check whether the Q15 version is working by plotting the signal and the output of the Q15 VAD algorithm. + +```python +_,ax=plt.subplots(1,1) +cleaned=clean_vad([signal_vad_q15(w) for w in slices_q15]) +vad_q15 = np.array([[w]*winOverlap for w in cleaned]).flatten() +ax.plot(data) +ax.plot(vad_q15) + +``` \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-5.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-5.md new file mode 100644 index 0000000000..e36593d3e0 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-5.md @@ -0,0 +1,390 @@ +--- +title: Write a noise suppression algorithm +weight: 6 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Write a noise suppression algorithm + +### Overlapping windows + +The blocks of audio samples you created in the previous steps will be multiplied by a Hanning window function, which looks like this: + +```python +window=dsp.arm_hanning_f32(winLength) +plt.plot(window) +plt.show() +``` + +![hanning alt-text#center](hanning.png "Figure 4. Hanning Window") + + +The slices we created are overlapping. By applying a Hanning window function and summing the slices, you can reconstruct the original signal. + +Indeed, summing two Hanning windows shifted by half the width of the sample block gives: +![summed hanning alt-text#center](sumhanning.png "Figure 5. Summed Hanning Window") + +As result, if you multiply the overlapping blocks of samples by Hanning windows and sum the result, you can reconstruct the original signal: + + +```python +offsets = range(0, len(data),winOverlap) +offsets=offsets[0:len(slices)] +res=np.zeros(len(data)) +i=0 +for n in offsets: + res[n:n+winLength] += slices[i]*window + i=i+1 +plt.plot(res) +plt.show() +``` + +You can now listen to the recombined signal: +```python +audio2=Audio(data=res,rate=samplerate,autoplay=False) +audio2 +``` + + +This means you can process each slice independently and then recombine them at the end to produce the output signal. + +### Principle of the noise reduction + +The algorithm works in the spectral domain, so a FFT will be used. +When there is no speech (as detected with the VAD), the noise level in each frequency band is estimated. + +When speech is detected, the noise estimate is used. + +Noise filtering in each band uses a simplified Wiener filter. + +A gain is applied to the signal, defined as follow: + +$$H(f) = \frac{S(f)}{S(f) + N(f)}$$ + +- \(S(f)\) is the speech spectrum. +- \(N(f)\) is the noise spectrum. + +$$H(f) = \frac{1}{1 + \frac{N(f)}{S(f)}}$$ + +For this tutorial, we assume a high SNR. The VAD relies on this assumption: the signal energy is sufficient to detect speech. +With a high signal-to-noise ratio, the transfer function can be approximated as: + +$$H(f) \approx 1 - \frac{N(f)}{S(f)}$$ + +You don't have access to \(S(f)\), only to the measured \(S(f) + N(f)\) which will be used under the assumption that the noise is small, making the approximation acceptable: + +$$H(f) \approx 1 - \frac{N(f)}{S(f) + N(f)}$$ + + +with \(S(f) + N(f) = E(f)\) + +- \(E(f)\) is the observed energy in a frequency band. + +It can be rewritten as: + +$$H(f) \approx \frac{E(f) - N(f)}{E(f)}$$ + +- \(N(f)\) is estimated when there is no speech. + +In the Python code below, you’ll see this formula implemented as: + +```python +scaling = (energy - self._noise)/energy +``` + +(Don’t evaluate this Python code in your Jupyter notebook—it will be run later as part of the full implementation.) + +### NoiseSuppression and NoiseSuppressionReference classes + +The entire algorithm will be packaged as a Python class. +The class functions are explained below using Python code that should not be evaluated in the Jupyter notebook. + +You should only evaluate the full class definition in the Jupyter notebook—not the code snippets used for explanation. + + +#### NoiseSuppression constructor + +`NoiseSuppression` is a shared class used by both the float reference implementation and the Q15 version. + +```python +class NoiseSuppression(): + def __init__(self,slices): + self._windowLength=len(slices[0]) + self._fftLen,self._fftShift=fft_length(self._windowLength) + + self._padding_left=(self._fftLen - self._windowLength)//2 + self._padding_right=self._fftLen- self._windowLength-self._padding_left + + self._signal=[] + self._slices=slices + self._window=None +``` + +The constructor for `NoiseSuppression`: +- Uses the audio slices as input +- Computes the FFT length that can be used for each slice +- Computes the padding needed for the FFT + +The FFT length must be a power of 2. The slice length is not necessarily a power of 2. The constructor computes the closest usable power of 2. The audio slices are padded with zeros on both sides to match the required FFT length. + +#### NoiseSuppressionReference constructor + +```python +class NoiseSuppressionReference(NoiseSuppression): + def __init__(self,slices): + NoiseSuppression.__init__(self,slices) + + # Compute the vad signal + self._vad=clean_vad([signal_vad(w) for w in slices]) + self._noise=np.zeros(self._fftLen) + # The Hann window + self._window=dsp.arm_hanning_f32(self._windowLength) +``` + +The constructor for `NoiseSuppressionReference`: +- Uses the audio slices as input +- Call the constructor for `NoiseSuppression` +- Computes the VAD signal for the full audio signal +- Compute the Hanning window + + +#### subnoise +```python +def subnoise(self,v): + # This is a Wiener estimate. + energy = v * np.conj(v) + 1e-6 + + scaling = (energy - self._noise)/energy + scaling[scaling<0] = 0 + + return(v * scaling) +``` + +This function computes the approximate Wiener gain. +If the gain is negative, it is set to 0. +A small value is added to the energy to avoid division by zero. +This function is applied to all frequency bands of the FFT. The `v` argument is a vector. + +#### remove_noise +```python +def remove_noise(self,w): + # We pad the signal with zeros. This assumes the padding is divisible by 2. + # A more robust implementation would also handle the odd-length case. + # The FFT length is greater than the window length and must be a power of 2. + sig=self.window_and_pad(w) + + # FFT + fft=np.fft.fft(sig) + # Noise suppression + fft = self.subnoise(fft) + # IFFT + res=np.fft.ifft(fft) + # We assume the result should be real, so we ignore the imaginary part. + res=np.real(res) + # We remove the padding. + res=self.remove_padding(res) + return(res) +``` + +The function computes the FFT (with padding) and reduces noise in the frequency bands using the approximate Wiener gain. + +#### estimate_noise +```python + def estimate_noise(self,w): + # Compute the padded signal. + sig=self.window_and_pad(w) + fft=np.fft.fft(sig) + + # Estimate the noise energy. + self._noise = np.abs(fft)*np.abs(fft) + + # Remove the noise. + fft = self.subnoise(fft) + + # Perform the IFFT, assuming the result is real, so we ignore the imaginary part. + res=np.fft.ifft(fft) + res=np.real(res) + res=self.remove_padding(res) + return(res) +``` + +This function is very similar to the previous one. +It's used when no speech detected. +It updates the noise estimate before reducing the noise. + + +#### nr + +```python +def nr(self): + for (w,v) in zip(self._slices,self._vad): + result=None + if v==1: + # If voice is detected, we only remove the noise. + result=self.remove_noise(w) + else: + # If no voice is detected, we update the noise estimate. + result=self.estimate_noise(w) + self._signal.append(result) +``` + +The main function: it removes noise from each slice. +If a slice does not contain speech, the noise estimate is updated before reducing noise in each frequency band. + +#### overlap_and_add + +The filtered slices are recombined: + +```python +def overlap_and_add(self): + offsets = range(0, len(self._signal)*winOverlap,winOverlap) + offsets=offsets[0:len(self._signal)] + res=np.zeros(len(data)) + i=0 + for n in offsets: + res[n:n+winLength]+=self._signal[i] + i=i+1 + return(res) +``` + +### The final code for the Python class + +You can evaluate this code in your Jupyter notebook. + +```python +def fft_length(length): + result=2 + fft_shift=1 + while result < length: + result = 2*result + fft_shift = fft_shift + 1 + return(result,fft_shift) + +class NoiseSuppression(): + def __init__(self,slices): + self._windowLength=len(slices[0]) + self._fftLen,self._fftShift=fft_length(self._windowLength) + + self._padding_left=(self._fftLen - self._windowLength)//2 + self._padding_right=self._fftLen- self._windowLength-self._padding_left + + self._signal=[] + self._slices=slices + self._window=None + + def window_and_pad(self,w): + if w.dtype==np.int32: + w=dsp.arm_mult_q31(w,self._window) + elif w.dtype==np.int16: + w=dsp.arm_mult_q15(w,self._window) + else: + w = w*self._window + sig=np.hstack([np.zeros(self._padding_left,dtype=w.dtype),w,np.zeros(self._padding_right,dtype=w.dtype)]) + return(sig) + + def remove_padding(self,w): + return(w[self._padding_left:self._padding_left+self._windowLength]) + +class NoiseSuppressionReference(NoiseSuppression): + def __init__(self,slices): + # In a better version this could be computed from the signal length by taking the + # smaller power of two greater than the signal length. + NoiseSuppression.__init__(self,slices) + + # Compute the vad signal + self._vad=clean_vad([signal_vad(w) for w in slices]) + self._noise=np.zeros(self._fftLen) + # The Hann window + self._window=dsp.arm_hanning_f32(self._windowLength) + + # Subtract the noise + def subnoise(self,v): + # This is a Wiener estimate + energy = v * np.conj(v) + 1e-6 + + scaling = (energy - self._noise)/energy + scaling[scaling<0] = 0 + + return(v * scaling) + + def remove_noise(self,w): + # We pad the signal with zero. It assumes that the padding can be divided by 2. + # In a better implementation we would manage also the odd case. + # The padding is required because the FFT has a length which is greater than the length of + # the window + sig=self.window_and_pad(w) + + # FFT + fft=np.fft.fft(sig) + # Noise suppression + fft = self.subnoise(fft) + # IFFT + res=np.fft.ifft(fft) + # We assume the result should be real so we just ignore the imaginary part + res=np.real(res) + # We remove the padding + res=self.remove_padding(res) + return(res) + + + + def estimate_noise(self,w): + # Compute the padded signal + sig=self.window_and_pad(w) + fft=np.fft.fft(sig) + + # Estimate the noise energy + self._noise = np.abs(fft)*np.abs(fft) + + # Remove the noise + fft = self.subnoise(fft) + + # IFFT and we assume the result is real so we ignore imaginary part + res=np.fft.ifft(fft) + res=np.real(res) + res=self.remove_padding(res) + return(res) + + # Process all the windows using the VAD detection + def nr(self): + for (w,v) in zip(self._slices,self._vad): + result=None + if v==1: + # If voice detected, we only remove the noise + result=self.remove_noise(w) + else: + # If no voice detected, we update the noise estimate + result=self.estimate_noise(w) + self._signal.append(result) + + # Overlap and add to rebuild the signal + def overlap_and_add(self): + offsets = range(0, len(self._signal)*winOverlap,winOverlap) + offsets=offsets[0:len(self._signal)] + res=np.zeros(len(data)) + i=0 + for n in offsets: + res[n:n+winLength]+=self._signal[i] + i=i+1 + return(res) +``` +You can now test this algorithm on the original signal: + +```python +n=NoiseSuppressionReference(slices) +n.nr() +cleaned=n.overlap_and_add() +plt.plot(cleaned) +plt.show() +``` + +![cleaned alt-text#center](cleaned.png "Figure 6. Cleaned signal") + +You can now listen to the result: + +```python +audioRef=Audio(data=cleaned,rate=samplerate,autoplay=False) +audioRef +``` \ No newline at end of file diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-6.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-6.md new file mode 100644 index 0000000000..10e9f97fcc --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-6.md @@ -0,0 +1,414 @@ +--- +title: Write the CMSIS-DSP Q15 implementation +weight: 7 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Write the CMSIS-DSP Q15 implementation + +### Slicing + +The CMSIS-DSP implementation is very similar to the reference implementation you just tested. + +With the following code, you’ll check that recombining the windowed block samples works correctly. +Since the Q15 representation is less accurate than float and can saturate, it’s a good idea to verify the recombination step. + +The Hanning window is converted to Q15 format. Then, the slices are multiplied by the Q15 Hanning window and summed. +The final result is converted to float. + +```python +offsets = range(0, len(data),winOverlap) +offsets=offsets[0:len(slices_q15)] +res=np.zeros(len(data)) +window_q15=fix.toQ15(window) +i=0 +for n in offsets: + w = dsp.arm_mult_q15(slices_q15[i],window_q15) + res[n:n+winLength] = dsp.arm_add_q15(res[n:n+winLength],w) + i=i+1 +res_q15=fix.Q15toF32(res) +plt.plot(res_q15) +plt.show() +``` +You can now listen to the audio to check the result: +```python +audio4=Audio(data=res_q15,rate=samplerate,autoplay=False) +audio4 +``` +### Utilities + +CMSIS-DSP does not have a complex data type. Complex numbers are represented as a float array with alternating real and imaginary parts: real, imaginary, real, imaginary, and so on. + +You’ll need functions to convert to and from NumPy complex arrays. + +```python +def imToReal1D(a): + ar=np.zeros(np.array(a.shape) * 2) + ar[0::2]=a.real + ar[1::2]=a.imag + return(ar) + +def realToIm1D(ar): + return(ar[0::2] + 1j * ar[1::2]) +``` + +## The final Q15 implementation + +Try the final implementation first, and then we’ll analyze the differences from the reference implementation. + +```python +class NoiseSuppressionQ15(NoiseSuppression): + def __init__(self,slices): + NoiseSuppression.__init__(self,slices) + + # VAD signal. + self._vad= clean_vad(np.array([signal_vad_q15(w) for w in slices])) + self._noise=np.zeros(self._fftLen,dtype=np.int32) + # Q15 version of the Hanning window. + self._window=fix.toQ15(dsp.arm_hanning_f32(self._windowLength)) + # CFFT Q15 instance. + self._cfftQ15=dsp.arm_cfft_instance_q15() + status=dsp.arm_cfft_init_q15(self._cfftQ15,self._fftLen) + + self._noise_status = -1 + self._noise_max = 0x7FFF + + + # Subtract the noise. + def subnoise(self,v,status,the_max): + + # We cannot compute the energy in Q15, because many values would otherwise be 0 + # The noise signal is too small for its energy to be representable in Q15. + # So we convert to Q31 and perform noise subtraction in Q31. + vq31 = dsp.arm_q15_to_q31(v) + energy = dsp.arm_cmplx_mag_squared_q31(vq31) + + # The energy for the signal and noise were computed on a rescaled signal. + # So, we remove the scaling from the values before computing the ratio (energy - noise) / energy. + # `status == 0` means the signal has been rescaled. + if status==0: + the_max_q31=dsp.arm_q15_to_q31([the_max])[0] + energy=dsp.arm_scale_q31(energy,the_max_q31,0) + energy=dsp.arm_scale_q31(energy,the_max_q31,0) + + noise = self._noise + # `status == 0` means the noise has been rescaled. + if self._noise_status==0: + the_max_q31=dsp.arm_q15_to_q31([self._noise_max])[0] + noise=dsp.arm_scale_q31(noise,the_max_q31,0) + noise=dsp.arm_scale_q31(noise,the_max_q31,0) + + + temp = dsp.arm_sub_q31(energy , noise) + temp[temp<0]=0 + + scalingQ31 = np.zeros(len(temp),dtype=np.int32) + shift = np.zeros(len(temp),dtype=np.int32) + + # The scaling factor `(energy - noise) / energy` is computed. + k=0 + # We assume that `|energy - noise|<=energy` + # Otherwise, we set scaling to `1` + # If energy is `0`, we also set scaling to `1`. + # When `a == b`, `shiftVal` is equal to `1` because `1` (as the result of the division operator) + # is represented as `0x40000000` with a shift of `1` instead of `0x7FFFFFFF` for output of division + # We handle this case separately + for a,b in zip(temp,energy): + quotient=0x7FFFFFFF + shiftVal=0 + if b!=0 and a!=b: + # We compute the quotient. + status,quotient,shiftVal = dsp.arm_divide_q31(a,b) + if shiftVal > 0: + quotient=0x7FFFFFFF + shiftVal = 0 + + scalingQ31[k] = quotient + shift[k] = shiftVal + + k = k + 1 + + + res=dsp.arm_cmplx_mult_real_q31(vq31,scalingQ31) + resQ15 = dsp.arm_q31_to_q15(res) + + return(resQ15) + + # To achieve maximum accuracy with the Q15 FFT, the signal is rescaled before computing the FFT + # It is divided by its maximum value. + def rescale(self,w): + the_max,index=dsp.arm_absmax_q15(w) + + quotient=0x7FFF + the_shift=0 + status = -1 + if the_max != 0: + status,quotient,the_shift = dsp.arm_divide_q15(0x7FFF,the_max) + if status == 0: + w=dsp.arm_scale_q15(w,quotient,the_shift) + return(w,status,the_max) + + # The scaling is removed after the IFFT is computed. + def undo_scale(self,w,the_max): + w=dsp.arm_scale_q15(w,the_max,0) + return(w) + + + def remove_noise(self,w): + w,status,the_max = self.rescale(w) + sig=self.window_and_pad(w) + + # Convert to complex. + signalR=np.zeros(len(sig) * 2,dtype=np.int16) + signalR[0::2]=sig + + + if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) + else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) + + resultR = self.subnoise(resultR,status,the_max) + + if dsp.has_neon(): + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,tmp=self._tmp) + else: + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,1) + res = dsp.arm_shift_q15(res,self._fftShift) + + res=res[0::2] + res=self.remove_padding(res) + + if status == 0: + res=self.undo_scale(res,the_max) + return(res) + + def estimate_noise(self,w): + w,status,the_max = self.rescale(w) + self._noise_status = status + self._noise_max = the_max + + sig=self.window_and_pad(w) + + signalR=np.zeros(len(sig) * 2) + signalR[0::2]=sig + + if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) + else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) + + resultRQ31 = dsp.arm_q15_to_q31(resultR) + + + self._noise = dsp.arm_cmplx_mag_squared_q31(resultRQ31) + + + resultR = np.zeros(len(resultR),dtype=np.int16) + + if dsp.has_neon(): + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,tmp=self._tmp) + else: + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,1) + res = dsp.arm_shift_q15(res,self._fftShift) + + res=res[0::2] + res=self.remove_padding(res) + + if status == 0: + res=self.undo_scale(res,the_max) + + return(res) + + def do_nothing(self,w): + w,status,the_max = self.rescale(w) + sig=self.window_and_pad(w) + + + # Convert to complex. + signalR=np.zeros(len(sig) * 2,dtype=np.int16) + signalR[0::2]=sig + + + if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,tmp=self._tmp) + else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) + res = dsp.arm_cfft_q15(self._cfftQ15,resultR,1,1) + res = dsp.arm_shift_q15(res,self._fftShift) + + res=res[0::2] + + res=self.remove_padding(res) + + if status == 0: + res=self.undo_scale(res,the_max) + + return(res) + + + def nr(self,nonr=False): + if dsp.has_neon(): + tmp_nb = dsp.arm_cfft_tmp_buffer_size(dt.Q15,self._fftLen,1) + self._tmp = np.zeros(tmp_nb,dtype=np.int16) + for (w,v) in zip(self._slices,self._vad): + result=None + if nonr: + result = self.do_nothing(w) + else: + if v==1: + result=self.remove_noise(w) + else: + result=self.estimate_noise(w) + self._signal.append(result) + + def overlap_and_add(self): + nbSamples = len(self._signal)*winOverlap + offsets = range(0, nbSamples,winOverlap) + offsets=offsets[0:len(self._signal)] + res=np.zeros(nbSamples,dtype=np.int16) + i=0 + for n in offsets: + res[n:n+winLength] = dsp.arm_add_q15(res[n:n+winLength],self._signal[i]) + i=i+1 + return(res) +``` + +Verify that the Q15 algorithm is working: + +```python +n=NoiseSuppressionQ15(slices_q15) +n.nr() +cleaned_q15=n.overlap_and_add() +plt.plot(fix.Q15toF32(cleaned_q15)) +plt.show() +``` + +You can now listen to the result: + +```python +audioQ15=Audio(data=fix.Q15toF32(cleaned_q15),rate=samplerate,autoplay=False) +audioQ15 +``` + +## Differences with the float implementation + +There are many differences from the original float implementation, which are explained below. + +### constructor + +The constructor is similar and uses Q15 instead of float. The Hanning window is converted to Q15, and Q15 versions of the CFFT objects are created. + +### subnoise + +The noise reduction function is more complex for several reasons: + +- Q15 is not accurate enough for the energy computation. Q31 is used instead. For instance: +```python +vq31 = dsp.arm_q15_to_q31(v) +energy = dsp.arm_cmplx_mag_squared_q31(vq31) +``` + +- For maximum accuracy, the signal is rescaled before calling this function. Since energy is not a linear function, the scaling factor must be compensated when computing the Wiener gain. The argument `status` is zero when the scaling has been applied. A similar scaling factor is applied to the noise: +```python +if status==0: + the_max_q31=dsp.arm_q15_to_q31([the_max])[0] + energy=dsp.arm_scale_q31(energy,the_max_q31,0) + energy=dsp.arm_scale_q31(energy,the_max_q31,0) +``` + +- CMSIS-DSP fixed-point division represents 1 exactly. So in Q31, instead of using `0x7FFFFFFF`, `1` is represented as `0x40000000` with a shift of `1`. This behavior is handled in the algorithm when converting the scaling factor to an approximate Q31 value: +```python +status,quotient,shiftVal = dsp.arm_divide_q31(a,b) +if shiftVal > 0: + quotient=0x7FFFFFFF + shiftVal = 0 +``` + +- The final scaling is performed using a Q31 multiplication, and the result is converted back to Q15: +```python +res = dsp.arm_cmplx_mult_real_q31(vq31,scalingQ31) +resQ15 = dsp.arm_q31_to_q15(res) +``` + +### rescaling + +To achieve maximum accuracy in Q15, the signal (and noise) is rescaled before computing the energy. +This rescaling function did not exist in the float implementation. The signal is divided by its maximum value to bring it to full scale:: + +```python +def rescale(self,w): + the_max,index=dsp.arm_absmax_q15(w) + + quotient=0x7FFF + the_shift=0 + status = -1 + if the_max != 0: + status,quotient,the_shift = dsp.arm_divide_q15(0x7FFF,the_max) + if status == 0: + w=dsp.arm_scale_q15(w,quotient,the_shift) + return(w,status,the_max) +``` + +The scaling must be reversed after the IFFT to allow recombining the slices and reconstructing the signal: + +```python +def undo_scale(self,w,the_max): + w=dsp.arm_scale_q15(w,the_max,0) + return(w) +``` + +### noise suppression + +The algorithm closely follows the float implementation. +However, there is a small difference because CMSIS-DSP can be built for Cortex-A and Cortex-M. On Cortex-A, there are small differences in the FFT API, as it uses a different implementation. + +If the Python package has been built with Neon acceleration, it will use the new API that requires an additional temporary buffer. + +If this temporary buffer is not provided, the Python package will allocate it automatically. While you can use the same API, this is less efficient. + +It is better to detect whether the package has been compiled with Neon acceleration, allocate a temporary buffer and use it in the FFT calls. This approach is closer to how the C API must be used. + +```python +if dsp.has_neon(): + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,tmp=self._tmp) +else: + resultR = dsp.arm_cfft_q15(self._cfftQ15,signalR,0,1) +``` + +In the Neon version, the FFT’s bit-reversal flag is no longer available. It’s not possible to disable bit reversal in the Neon version. + +A scaling factor must be applied to the IFFT output: + +```python +res = dsp.arm_shift_q15(res,self._fftShift) +``` + +This scaling is unrelated to the signal and noise scaling used for improved accuracy. + +The output of the Q15 IFFT is not in Q15 format and must be converted. This is typical of fixed-point FFTs, and the same applies to Q31 FFTs. + +Finally, the accuracy-related scaling factor is removed at the end of the function: + +```python +if status == 0: + res=self.undo_scale(res,the_max) +``` + +### noise estimation + +The noise estimation function performs both noise estimation and noise suppression. + +Noise energy is computed in Q31 for higher accuracy. +The FFT functions detect whether the package was built with Neon support. + +### donothing + +`donothing` is a debug function. You can disable noise reduction and test only slicing, overlap-add, and the FFT/IFFT in between. + +This function applies scaling and performs the FFT/IFFT. + +It’s a good way to check for saturation issues (which are common with fixed-point arithmetic) and to ensure proper scaling compensation. diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-7.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-7.md new file mode 100644 index 0000000000..94f3f21b47 --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-7.md @@ -0,0 +1,118 @@ +--- +title: Convert the CMSIS-DSP Python to C +weight: 8 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Convert the CMSIS-DSP Python to C + +Once the Python code is working, writing the C code should be straightforward, since the CMSIS-DSP Python wrapper’s API closely mirrors the C API. + +### Rescaling +For example, let’s look at rescaling +#### Python version + +```python +def rescale(self,w): + the_max,index=dsp.arm_absmax_q15(w) + + quotient=0x7FFF + the_shift=0 + status = -1 + if the_max != 0: + status,quotient,the_shift = dsp.arm_divide_q15(0x7FFF,the_max) + if status == 0: + w=dsp.arm_scale_q15(w,quotient,the_shift) + return(w,status,the_max) +``` +#### C Version + +```C +#include "dsp/basic_math_functions.h" +#include "dsp/statistics_functions.h" + +arm_status rescale(q15_t *w, uint32_t nb,q15_t *the_max) +{ + uint32_t index; + q15_t quotient = 0x7FFF; + /* Default status value for signal is zero so can't be rescaled */ + arm_status status = ARM_MATH_SINGULAR; + int16_t the_shift = 0; + *the_max=0; + + arm_absmax_q15(w,nb,the_max,&index); + if (*the_max != 0) + { + status = arm_divide_q15(0x7FFF,*the_max,"ient,&the_shift); + if (status == ARM_MATH_SUCCESS) + { + arm_scale_q15(w,quotient,(int8_t)the_shift,w,nb); + } + } + + return(status); + +} + +``` + +### Signal energy + +#### Python version +```python +def signal_energy_q15(window): + mean=dsp.arm_mean_q15(window) + # If we subtract the mean, we won't get saturation. + # So we use the CMSIS-DSP negate function on an array containing a single sample. + neg_mean=dsp.arm_negate_q15([mean])[0] + window=dsp.arm_offset_q15(window,neg_mean) + energy=dsp.arm_power_q15(window) + # Energy is not in Q15 (refer to CMSIS-DSP documentation). + energy=dsp.ssat(energy>>20,16) + dB=dsp.arm_vlog_q15([energy]) + # The output of the `vlog` is not in Q15 + # The multiplication by `10` is missing compared to the NumPy + # reference implementation. + # The result of this function is not equivalent to the float implementation due to the different + # formats used in the intermediate computations. + # As a consequence, a different threshold will have to be used + # to compensate for these differences. + return(dB[0]) +``` + +#### C version +```C +#include "dsp/basic_math_functions.h" +#include "dsp/fast_math_functions.h" +#include "dsp/statistics_functions.h" + +int16_t signal_energy_q15(q15_t *window,uint32_t nb) +{ + q15_t mean,neg_mean; + arm_mean_q15(window,nb,&mean); + + arm_negate_q15(&mean,&neg_mean,1); + + arm_offset_q15(window,neg_mean,window,nb); + + q63_t energy_q63; + q15_t energy; + arm_power_q15(window,nb,&energy_q63); + + energy=(q15_t)__SSAT((q31_t)(energy_q63>>20),16); + + // Fixed point format of result is on 16 bit + // but the specific format has not been identified + // to make this tutorial easier. + // We just know it is not q15 + int16_t dB; + + arm_vlog_q15(&energy,&dB,1); + + return(dB); +} +``` + +A DSP function written in Python using CMSIS-DSP can be easily converted into a similar C function. diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-8.md b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-8.md new file mode 100644 index 0000000000..5fc02c677a --- /dev/null +++ b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/how-to-8.md @@ -0,0 +1,27 @@ +--- +title: Study more examples +weight: 9 + +### FIXED, DO NOT MODIFY +layout: learningpathall +--- + +## Study more examples + +The [CMSIS-DSP python example folder](https://github.com/ARM-software/CMSIS-DSP/tree/main/PythonWrapper/examples) contains many tests, examples, and some Jupyter notebooks. + +You can study these examples to gain a better understanding of how to use the Python package. + +The [CMSIS-DSP python package](https://pypi.org/project/cmsisdsp/) describes the differences between the Python API and the C API. + + +## Remaining issues + +The CMSIS-DSP Python package helps to design and translate a DSP function working on a block of samples from Python to C. +But in a real application, you don’t receive blocks of samples, but rather a continuous stream. + +The stream of samples must be split into blocks before the DSP function can be used. The processed blocks may need to be recombined to reconstruct a signal. + +Part of the difficulty in this learning path comes from splitting and recombining the signal. Translating this part of the Python code to C adds further complexity. + +[CMSIS-Stream](https://github.com/ARM-software/CMSIS-Stream) may help for this. It is a platform-independent technology designed to simplify the use of block-processing functions with sample streams. It is a low-overhead solution. diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/signal.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/signal.png new file mode 100644 index 0000000000..83640fdbe5 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/signal.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/sumhanning.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/sumhanning.png new file mode 100644 index 0000000000..e9a8ca4da5 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/sumhanning.png differ diff --git a/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/vad.png b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/vad.png new file mode 100644 index 0000000000..7b2d0195b7 Binary files /dev/null and b/content/learning-paths/embedded-and-microcontrollers/cmsisdsp-dev-with-python/vad.png differ diff --git a/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html b/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html index 3d2d0da93a..ca5cc446bf 100644 --- a/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html +++ b/themes/arm-design-system-hugo-theme/layouts/_default/baseof.html @@ -9,6 +9,10 @@ {{partial "head/analytics.html" .}} {{partial "head/conditional-redirect.html" .}} {{partial "head/hotjar-surveys.html" .}} + + {{ if .Param "math" }} + {{ partialCached "math.html" . }} + {{ end }} {{partial "header/nav-masthead.html" .}} diff --git a/themes/arm-design-system-hugo-theme/layouts/partials/math.html b/themes/arm-design-system-hugo-theme/layouts/partials/math.html new file mode 100644 index 0000000000..1f74076371 --- /dev/null +++ b/themes/arm-design-system-hugo-theme/layouts/partials/math.html @@ -0,0 +1,12 @@ + + \ No newline at end of file