Skip to content

Commit 5e6ee80

Browse files
committed
Merge remote-tracking branch 'upstream/main 6fce200' into macos
6fce200 [CI] Install qemu (tensorflow#3201)… [email protected] 24.09.2025 10:27 AM
2 parents a3f2f42 + 6fce200 commit 5e6ee80

File tree

15 files changed

+163
-144
lines changed

15 files changed

+163
-144
lines changed

.github/workflows/ci.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ jobs:
259259
with:
260260
ref: ${{ inputs.trigger-sha }}
261261
- name: Test
262-
uses: docker://ghcr.io/tflm-bot/tflm-ci:latest
262+
uses: docker://ghcr.io/tflm-bot/tflm-ci:0.6.4
263263
with:
264264
args: /bin/sh -c tensorflow/lite/micro/tools/ci_build/test_cortex_m_qemu.sh tflite-micro/
265265

@@ -275,7 +275,7 @@ jobs:
275275
with:
276276
ref: ${{ inputs.trigger-sha }}
277277
- name: Check
278-
uses: docker://ghcr.io/tflm-bot/tflm-ci:latest
278+
uses: docker://ghcr.io/tflm-bot/tflm-ci:0.6.4
279279
with:
280280
args: /bin/sh -c "git config --global --add safe.directory /github/workspace && tensorflow/lite/micro/tools/ci_build/test_code_style.sh"
281281

ci/Dockerfile.micro

Lines changed: 75 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1,80 +1,82 @@
1-
# This docker container can be used to run all the TFLM CI checks.
1+
# TFLM CI Docker Container
22
#
3-
# It is only used as part of the GitHub workflows to test for code-style. But
4-
# the container is available and ready for use to run all the checks locally,
5-
# in case that is useful for debugging. See all the versions at
6-
# https://github.com/users/TFLM-bot/packages/container/tflm-ci/versions
3+
# This container includes all dependencies for TFLM CI checks and is
4+
# recommended for local debugging to ensure a consistent environment.
75
#
8-
# docker pull ghcr.io/tflm-bot/tflm-ci:<version>
6+
# === Usage ===
97
#
10-
# Build you own container with:
11-
# docker build -f ci/Dockerfile.micro -t tflm-ci .
8+
# --- Pull a prebuilt image ---
9+
# See all versions: https://github.com/users/TFLM-bot/packages/container/tflm-ci/versions
10+
# docker pull ghcr.io/tflm-bot/tflm-ci:<version>
11+
#
12+
# --- Build locally ---
13+
# (Run from the root of the TFLM repository)
14+
# docker build -f ci/Dockerfile.micro -t tflm-ci .
15+
#
16+
# --- Build and upload to GHCR.IO ---
17+
# (Run from the root of the TFLM repository)
18+
# export TFLM_CI_VERSION=0.6.4
19+
# docker build -f ci/Dockerfile.micro -t ghcr.io/tflm-bot/tflm-ci:$TFLM_CI_VERSION .
20+
# docker push ghcr.io/tflm-bot/tflm-ci:$TFLM_CI_VERSION
21+
#
22+
# --- Run locally for debugging ---
23+
# (Mounts local repo and opens a shell)
24+
# (Run from the root of the TFLM repository)
25+
# docker run -it --rm -v $(pwd):/opt/tflm -w /opt/tflm tflm-ci /bin/bash
1226
#
13-
# Use a prebuilt Python image instead of base Ubuntu to speed up the build process,
14-
# since it has all the build dependencies we need for Micro and downloads much faster
15-
# than the install process.
16-
17-
# Using a multistage build so that the build tools required for stage 1 don't make the
18-
# CI container unnecessarily large.
19-
FROM python:3.10-bullseye AS qemu_builder
20-
RUN apt-get update
21-
22-
RUN apt-get install -y ninja-build wget xz-utils
23-
RUN apt-get install -y libglib2.0-dev
24-
RUN apt-get install -y build-essential libcairo2-dev libpango1.0-dev libjpeg-dev libgif-dev librsvg2-dev
25-
26-
COPY ci/install_qemu.sh ./
27-
# Installs all built files into /qemu_install rather than /usr/local default.
28-
RUN ./install_qemu.sh /tmp/qemu_install
29-
30-
# This stage is the final CI container.
31-
FROM python:3.10-bullseye AS tflm-ci
32-
33-
RUN apt-get update
34-
35-
RUN apt-get install -y zip xxd sudo
36-
37-
RUN apt install -y lsb-release wget software-properties-common gnupg
38-
RUN wget https://apt.llvm.org/llvm.sh
39-
RUN chmod +x llvm.sh
40-
RUN ./llvm.sh 16
41-
RUN ln -s /usr/bin/clang-16 /usr/bin/clang
42-
RUN ln -s /usr/bin/clang++-16 /usr/bin/clang++
43-
44-
RUN apt-get install clang-format-16
45-
RUN ln -s /usr/bin/clang-format-16 /usr/bin/clang-format
46-
47-
# Needed when using the Dockerfile locally.
48-
RUN git config --global --add safe.directory /opt/tflm
49-
50-
# Needed when the docker container is used with GitHub actions.
51-
RUN git config --global --add safe.directory /github/workspace
52-
53-
# Install yapf to check for Python formatting as part of the TFLM continuous
54-
# integration.
55-
RUN pip install yapf==0.32.0
56-
57-
# Pillow was added first for the C array generation as a result of the following
58-
# PRs:
59-
# https://github.com/tensorflow/tflite-micro/pull/337
60-
# https://github.com/tensorflow/tflite-micro/pull/410
61-
RUN pip install Pillow
62-
63-
# necessary bits for create_size_log scripts
64-
RUN pip install pandas
65-
RUN pip install matplotlib
66-
RUN pip install six
67-
68-
# Install Renode test dependencies
69-
RUN pip install pyyaml requests psutil robotframework==4.0.1
70-
71-
# Install QEMU from build container qemu_builder into tflm-ci container.
72-
# We're using a two stage build to keep the CI container smaller.
73-
WORKDIR /usr/local
74-
# Merge built files into /usr/local so that the path is already setup.
75-
COPY --from=qemu_builder /tmp/qemu_install/. .
7627

28+
# Use a prebuilt Python image to speed up the build process.
29+
FROM python:3.10-bookworm
30+
31+
# Install all required system dependencies
32+
RUN apt-get update && apt-get install -y \
33+
gnupg \
34+
lsb-release \
35+
qemu-system-arm \
36+
software-properties-common \
37+
sudo \
38+
wget \
39+
xxd \
40+
zip \
41+
&& apt-get clean \
42+
&& rm -rf /var/lib/apt/lists/*
43+
44+
# Install LLVM/Clang and create symlinks
45+
RUN wget https://apt.llvm.org/llvm.sh \
46+
&& chmod +x llvm.sh \
47+
&& ./llvm.sh 21 \
48+
&& rm llvm.sh \
49+
&& apt-get update && apt-get install -y clang-21 clang++-21 clang-format-21 \
50+
&& ln -s /usr/bin/clang-21 /usr/bin/clang \
51+
&& ln -s /usr/bin/clang++-21 /usr/bin/clang++ \
52+
&& ln -s /usr/bin/clang-format-21 /usr/bin/clang-format \
53+
&& apt-get clean \
54+
&& rm -rf /var/lib/apt/lists/*
55+
56+
# Install all Python dependencies
57+
# - yapf to check for Python formatting
58+
# - Pillow was added first for the C array generation
59+
# - pandas, matplotlib, and six for create_size_log scripts
60+
# - pyyaml, requests, psutil, and robotframework for Renode test
61+
RUN pip install --no-cache-dir --upgrade pip && \
62+
pip install --no-cache-dir \
63+
matplotlib \
64+
pandas \
65+
Pillow \
66+
psutil \
67+
pyyaml \
68+
requests \
69+
robotframework==4.0.1 \
70+
six \
71+
yapf==0.40.2
72+
73+
# Copy and run install scripts, then clean them up
7774
WORKDIR /
7875
COPY ci/*.sh /install/
79-
RUN /install/install_bazelisk.sh
80-
RUN /install/install_buildifier.sh
76+
RUN /install/install_bazelisk.sh && \
77+
/install/install_buildifier.sh && \
78+
rm -rf /install
79+
80+
# Configure git safe directories
81+
RUN git config --global --add safe.directory /opt/tflm && \
82+
git config --global --add safe.directory /github/workspace

ci/install_bazelisk.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,7 @@
1515
# ==============================================================================
1616

1717
set -e
18-
wget https://github.com/bazelbuild/bazelisk/releases/download/v1.16.0/bazelisk-linux-amd64
18+
wget https://github.com/bazelbuild/bazelisk/releases/download/v1.27.0/bazelisk-linux-amd64
1919
mv bazelisk-linux-amd64 bazel
2020
chmod +x bazel
2121
mv bazel /usr/local/bin
22-

tensorflow/lite/experimental/microfrontend/lib/filterbank_test.cc

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelFrequencyStarts) {
7676
kSampleRate, kSpectrumSize));
7777

7878
const int16_t expected[] = {0, 4, 8};
79-
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
80-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
79+
TF_LITE_MICRO_EXPECT_EQ(
80+
state.num_channels + 1,
81+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
8182
int i;
8283
for (i = 0; i <= state.num_channels; ++i) {
8384
TF_LITE_MICRO_EXPECT_EQ(state.channel_frequency_starts[i], expected[i]);
@@ -93,8 +94,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWeightStarts) {
9394
kSampleRate, kSpectrumSize));
9495

9596
const int16_t expected[] = {0, 8, 16};
96-
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
97-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
97+
TF_LITE_MICRO_EXPECT_EQ(
98+
state.num_channels + 1,
99+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
98100
int i;
99101
for (i = 0; i <= state.num_channels; ++i) {
100102
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[i], expected[i]);
@@ -110,8 +112,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWidths) {
110112
kSampleRate, kSpectrumSize));
111113

112114
const int16_t expected[] = {8, 8, 8};
113-
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
114-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
115+
TF_LITE_MICRO_EXPECT_EQ(
116+
state.num_channels + 1,
117+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
115118
int i;
116119
for (i = 0; i <= state.num_channels; ++i) {
117120
TF_LITE_MICRO_EXPECT_EQ(state.channel_widths[i], expected[i]);
@@ -129,9 +132,10 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckWeights) {
129132
const int16_t expected[] = {0, 3277, 2217, 1200, 222, 0, 0, 0,
130133
0, 3376, 2468, 1591, 744, 0, 0, 0,
131134
0, 4020, 3226, 2456, 1708, 983, 277, 0};
132-
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] +
133-
state.channel_widths[state.num_channels],
134-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
135+
TF_LITE_MICRO_EXPECT_EQ(
136+
state.channel_weight_starts[state.num_channels] +
137+
state.channel_widths[state.num_channels],
138+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
135139
for (size_t i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
136140
TF_LITE_MICRO_EXPECT_EQ(state.weights[i], expected[i]);
137141
}
@@ -148,9 +152,10 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckUnweights) {
148152
const int16_t expected[] = {0, 819, 1879, 2896, 3874, 0, 0, 0,
149153
0, 720, 1628, 2505, 3352, 0, 0, 0,
150154
0, 76, 870, 1640, 2388, 3113, 3819, 0};
151-
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] +
152-
state.channel_widths[state.num_channels],
153-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
155+
TF_LITE_MICRO_EXPECT_EQ(
156+
state.channel_weight_starts[state.num_channels] +
157+
state.channel_widths[state.num_channels],
158+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
154159
for (size_t i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
155160
TF_LITE_MICRO_EXPECT_EQ(state.unweights[i], expected[i]);
156161
}
@@ -204,8 +209,9 @@ TF_LITE_MICRO_TEST(FilterbankTest_CheckSqrt) {
204209
uint32_t* scaled_filterbank = FilterbankSqrt(&state, kScaleShift);
205210

206211
const uint32_t expected[] = {247311, 508620};
207-
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
208-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
212+
TF_LITE_MICRO_EXPECT_EQ(
213+
state.num_channels,
214+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
209215
int i;
210216
for (i = 0; i < state.num_channels; ++i) {
211217
TF_LITE_MICRO_EXPECT_EQ(scaled_filterbank[i], expected[i]);

tensorflow/lite/experimental/microfrontend/lib/noise_reduction_test.cc

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,9 @@ TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReductionEstimate) {
4848
NoiseReductionApply(&state, signal);
4949

5050
const uint32_t expected[] = {6321887, 31248341};
51-
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
52-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
51+
TF_LITE_MICRO_EXPECT_EQ(
52+
state.num_channels,
53+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
5354
int i;
5455
for (i = 0; i < state.num_channels; ++i) {
5556
TF_LITE_MICRO_EXPECT_EQ(state.estimate[i], expected[i]);
@@ -68,8 +69,9 @@ TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReduction) {
6869
NoiseReductionApply(&state, signal);
6970

7071
const uint32_t expected[] = {241137, 478104};
71-
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
72-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
72+
TF_LITE_MICRO_EXPECT_EQ(
73+
state.num_channels,
74+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
7375
int i;
7476
for (i = 0; i < state.num_channels; ++i) {
7577
TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]);

tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_test.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,9 @@ TF_LITE_MICRO_TEST(PcanGainControlTest_TestPcanGainControl) {
5252
PcanGainControlApply(&state, signal);
5353

5454
const uint32_t expected[] = {3578, 1533};
55-
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
56-
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
55+
TF_LITE_MICRO_EXPECT_EQ(
56+
state.num_channels,
57+
static_cast<int>(sizeof(expected) / sizeof(expected[0])));
5758
int i;
5859
for (i = 0; i < state.num_channels; ++i) {
5960
TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]);

tensorflow/lite/micro/cortex_m_corstone_300/system_setup.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,8 @@ uint32_t GetCurrentTimeTicks() {
104104

105105
#ifdef ETHOS_U
106106
#if defined(ETHOSU_FAST_MEMORY_SIZE) && ETHOSU_FAST_MEMORY_SIZE > 0
107-
__attribute__((aligned(16), section(".bss.ethosu_scratch")))
108-
uint8_t ethosu0_scratch[ETHOSU_FAST_MEMORY_SIZE];
107+
__attribute__((aligned(16), section(".bss.ethosu_scratch"))) uint8_t
108+
ethosu0_scratch[ETHOSU_FAST_MEMORY_SIZE];
109109
#else
110110
#define ethosu0_scratch 0
111111
#define ETHOSU_FAST_MEMORY_SIZE 0

tensorflow/lite/micro/docs/qemu.md

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,6 @@ Our test scripts assume that the non static `user` mode installation of QEMU is
1313
available in the PATH. For example, if using QEMU for ARM testing, please make
1414
sure `qemu-arm` is installed and available to the test scripts.
1515

16-
You can use `ci/install_qemu.sh` to download, build and install the version of
17-
qemu that is used as part of the CI.
18-
1916
# Software Emulation with QEMU
2017
TensorFlow Lite Micro makes use of [QEMU](https://qemu.org) to
2118
for testing cross compiled tests.

0 commit comments

Comments
 (0)