Skip to content

Commit 05ed3ae

Browse files
authored
Merge pull request #86 from intel/update-branch
feat: fix digital avatar demo & enhance microservices dockerfile to production standard (#265)
2 parents 3f7be50 + cc247ef commit 05ed3ae

File tree

19 files changed

+160
-110
lines changed

19 files changed

+160
-110
lines changed

usecases/ai/digital-avatar/.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,5 +8,7 @@ backend/musetalk/models
88
backend/musetalk/data/avatars
99
backend/wav2lip/wav2lip/results
1010
backend/wav2lip/wav2lip/temp
11+
assets/*
1112
weights/*
12-
backend/liveportrait/templates
13+
backend/liveportrait/templates
14+
data/ollama

usecases/ai/digital-avatar/README.md

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,7 @@ You can offload model inference to specific device by modifying the environment
8585
| Workload | Environment Variable |Supported Device |
8686
|----------------------|----------------------|-------------------------|
8787
| LLM | - | GPU |
88-
| STT - Encoded Device | STT_ENCODED_DEVICE | CPU,GPU,NPU |
89-
| STT - Decided Device | STT_DECODED_DEVICE | CPU,GPU |
88+
| STT | STT_DEVICE | CPU,GPU,NPU |
9089
| TTS | TTS_DEVICE | CPU |
9190
| Lipsync (Wav2lip) | DEVICE | CPU, GPU |
9291

@@ -95,14 +94,18 @@ Example Configuration:
9594
* To offload the STT encoded workload to `NPU`, you can use the following configuration.
9695

9796
```
98-
wav2lip:
97+
stt_service:
9998
...
10099
environment:
101100
...
102-
DEVICE=CPU
101+
STT_DEVICE=CPU
103102
...
104103
```
105104
105+
## Limitations
106+
### 1. Automatic Speech Recognition Compatibility
107+
Automatic speech recognition functionality is not supported in Firefox. Please use Chrome for validated performance.
108+
106109
## FAQ
107110
### 1. Update Render Group ID
108111
1. Ensure the [Intel GPU driver](#prerequisite) is installed.
@@ -116,4 +119,3 @@ wav2lip:
116119
```
117120
4. The group ID is the number in the third field (e.g., `110` in the example above).
118121
5. Ensure the `RENDER_GROUP_ID` in the [docker-compose.yml](./docker-compose.yml) file matches the render group ID.
119-

usecases/ai/digital-avatar/data/.gitkeep

Whitespace-only changes.

usecases/ai/digital-avatar/docker-compose.yml

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ services:
120120
ports:
121121
- 8014:5996
122122
volumes:
123-
- ./data:/usr/src/app/data:rw
123+
- stt_volume:/usr/src/app/data:rw
124124
devices:
125125
- /dev:/dev:rw
126126
- /lib/modules:/lib/modules:rw
@@ -151,13 +151,22 @@ services:
151151
- /dev:/dev:rw
152152
- /lib/modules:/lib/modules:rw
153153
volumes:
154-
- ../microservices/ollama:/root/.ollama
154+
- ./data/ollama:/root/.ollama
155+
- ./scripts/run.sh:/opt/intel/llm-app/run.sh
156+
- ./scripts/healthcheck.sh:/opt/intel/llm-app/healthcheck.sh
157+
healthcheck:
158+
test: ["CMD", "bash", "/opt/intel/llm-app/healthcheck.sh"]
159+
interval: 1m30s
160+
timeout: 30s
161+
retries: 5
162+
start_period: 30s
155163
command: "./run.sh"
156164

157165
networks:
158166
app-network:
159167

160168
volumes:
169+
stt_volume:
161170
tts_volume:
162171
data_volume:
163172
ollama:

usecases/ai/microservices/ollama/healthcheck.sh renamed to usecases/ai/digital-avatar/scripts/healthcheck.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
#!/bin/bash
2-
32
# Ensure OLLAMA_HOST is set
43
if [ -z "$OLLAMA_HOST" ]; then
54
echo "Error: OLLAMA_HOST environment variable is not set."
@@ -13,13 +12,11 @@ if [ -z "$LLM_MODEL" ]; then
1312
fi
1413

1514
PORT=$(echo "$OLLAMA_HOST" | cut -d':' -f2)
16-
1715
# Capture the HTTP response code and response body separately
1816
http_code=$(curl -s -o response.txt -w "%{http_code}" -X POST "http://localhost:$PORT/api/show" -d "{\"model\": \"$LLM_MODEL\"}")
1917
response=$(cat response.txt)
2018

2119
echo "HTTP Response Code: $http_code"
22-
2320
if [ "$http_code" -ne 200 ]; then
2421
echo "Error: HTTP request failed with status code $http_code"
2522
rm response.txt
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,4 @@ curl -# http://localhost:"$PORT"/api/generate -d "{\"model\": \"$LLM_MODEL\", \"
2929

3030
# To keep the container running, use an indefinite wait
3131
echo "Ollama is running. Keeping container alive..."
32-
tail -f /dev/null
32+
tail -f /dev/null
Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,31 @@
11
# Copyright (C) 2024 Intel Corporation
22
# SPDX-License-Identifier: Apache-2.0
33

4-
FROM intel/oneapi-basekit:2024.2.1-0-devel-ubuntu22.04
4+
FROM intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04
55

66
WORKDIR /opt/intel/llm-app
7-
RUN apt update \
8-
&& apt install -y python3.11-venv jq \
9-
wget \
7+
RUN rm -rf /etc/apt/sources.list.d/intel-graphics.list \
8+
&& apt update \
9+
&& apt install -y python3.11 \
10+
python3.11-venv \
11+
wget \
1012
&& wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg \
1113
&& echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list \
1214
&& apt update \
13-
&& apt-get install -y libze1 intel-level-zero-gpu intel-opencl-icd clinfo \
15+
&& apt-get install -y libze1 \
16+
intel-level-zero-gpu \
17+
intel-opencl-icd \
18+
clinfo \
1419
&& rm -rf /var/lib/apt/lists/* \
1520
&& python3.11 -m venv .venv
1621
ENV PATH="/opt/intel/llm-app/.venv/bin:$PATH"
22+
ENV LD_LIBRARY_PATH=".:$LD_LIBRARY_PATH"
1723
RUN python3 -m pip install --upgrade pip \
18-
&& python3 -m pip install --pre --upgrade 'ipex-llm[cpp]==2.2.0b20240917' \
19-
&& python3 -m pip install bigdl-core-cpp==2.6.0b20240917 \
24+
&& python3 -m pip install --pre --upgrade 'ipex-llm[cpp]==2.2.0b20241216' \
2025
&& python3 -m pip install --upgrade accelerate==0.33.0 \
2126
&& init-ollama
2227

23-
COPY ./run.sh .
28+
HEALTHCHECK --interval=60s --timeout=5m --start-period=5s --retries=5 \
29+
CMD curl --fail http://localhost:11434 || exit 1
2430

25-
COPY ./healthcheck.sh .
26-
HEALTHCHECK --interval=60s --timeout=20m --start-period=5s --retries=20 \
27-
CMD ./healthcheck.sh
31+
CMD ["./ollama", "serve"]

usecases/ai/microservices/speech-to-text/Dockerfile

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -6,22 +6,25 @@ FROM openvino/ubuntu22_dev:2024.5.0
66
USER root
77

88
# Install dependencies
9-
RUN apt-get update && apt-get install -y ffmpeg \
10-
wget \
11-
gnupg2 \
12-
libtbb12 \
13-
python3.11 \
14-
python3.11-venv
9+
RUN apt-get update \
10+
&& apt-get install -y ffmpeg \
11+
wget \
12+
gnupg2 \
13+
libtbb12 \
14+
python3.11 \
15+
python3.11-venv
1516

1617
# Install GPU drivers
17-
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg \
18-
&& echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list \
19-
&& apt update \
20-
&& apt-get install -y libze1 \
21-
intel-level-zero-gpu \
22-
intel-opencl-icd \
23-
clinfo \
24-
&& rm -rf /var/lib/apt/lists/*
18+
RUN mkdir /tmp/neo \
19+
&& cd /tmp/neo \
20+
&& wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17791.9/intel-igc-core_1.0.17791.9_amd64.deb \
21+
&& wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17791.9/intel-igc-opencl_1.0.17791.9_amd64.deb \
22+
&& wget https://github.com/intel/compute-runtime/releases/download/24.39.31294.12/intel-level-zero-gpu-dbgsym_1.6.31294.12_amd64.ddeb \
23+
&& wget https://github.com/intel/compute-runtime/releases/download/24.39.31294.12/intel-level-zero-gpu_1.6.31294.12_amd64.deb \
24+
&& wget https://github.com/intel/compute-runtime/releases/download/24.39.31294.12/intel-opencl-icd-dbgsym_24.39.31294.12_amd64.ddeb \
25+
&& wget https://github.com/intel/compute-runtime/releases/download/24.39.31294.12/intel-opencl-icd_24.39.31294.12_amd64.deb \
26+
&& wget https://github.com/intel/compute-runtime/releases/download/24.39.31294.12/libigdgmm12_22.5.2_amd64.deb \
27+
&& dpkg -i *.deb
2528

2629
# Install NPU drivers
2730
RUN mkdir /tmp/npu-driver \
@@ -33,7 +36,11 @@ RUN mkdir /tmp/npu-driver \
3336
&& dpkg -i *.deb
3437

3538
RUN mkdir -p /usr/src \
36-
&& chown -R openvino:openvino /usr/src
39+
&& chown -R openvino:openvino /usr/src \
40+
&& mkdir -p /usr/src/app/data \
41+
&& chown -R openvino:openvino /usr/src/app/data \
42+
&& mkdir -p /usr/src/app/tmp_audio \
43+
&& chown -R openvino:openvino /usr/src/app/tmp_audio
3744

3845
USER openvino
3946
WORKDIR /usr/src/app
@@ -44,8 +51,10 @@ ENV PATH="/usr/src/.venv/bin:$PATH"
4451
COPY --chown=openvino:openvino requirements.txt .
4552
RUN python3 -m pip install -r requirements.txt
4653

47-
COPY --chown=openvino:openvino server.py .
54+
COPY --chown=openvino:openvino main.py .
4855
COPY --chown=openvino:openvino utils.py .
4956

5057
HEALTHCHECK --interval=60s --timeout=180s --start-period=5s --retries=3 \
51-
CMD wget --no-verbose --no-proxy -O /dev/null --tries=1 http://stt_service:5996/healthcheck || exit 1
58+
CMD wget --no-verbose -O /dev/null --tries=1 http://stt_service:5996/healthcheck || exit 1
59+
60+
CMD ["python3", "-m", "uvicorn", "main:app", "--host", "stt_service", "--port", "5996"]
File renamed without changes.

0 commit comments

Comments
 (0)