-
Notifications
You must be signed in to change notification settings - Fork 0
43 lines (36 loc) · 1.29 KB
/
run_tests.yml
File metadata and controls
43 lines (36 loc) · 1.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
name: ModalX AI Auto-Test
# Trigger: Run this every time we push to the 'main' branch
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
jobs:
test-phase-1:
runs-on: ubuntu-latest # Use a Linux server provided by GitHub
steps:
# 1. Check out your code
- uses: actions/checkout@v3
# 2. Install FFmpeg (Required for Audio)
- name: Install FFmpeg
run: sudo apt-get update && sudo apt-get install -y ffmpeg
# 3. Set up Python
- name: Set up Python 3.9
uses: actions/setup-python@v4
with:
python-version: "3.9"
# 4. Install your Libraries
- name: Install Dependencies
run: |
python -m pip install --upgrade pip
pip install openai-whisper librosa numpy "moviepy<1.0.3" scipy
# 5. Create a Dummy Video for Testing (Because we don't upload large videos)
- name: Create Dummy Video
run: |
# We use ffmpeg to generate a 2-second blank video with silence
ffmpeg -f lavfi -i testsrc=duration=2:size=1280x720:rate=30 -f lavfi -i sine=frequency=1000:duration=2 -c:v libx264 -c:a aac -shortest Phase_1_Speech_Analysis/test_video.mp4
# 6. Run Your Phase 1 Script
- name: Run Phase 1 Analysis
run: |
cd Phase_1_Speech_Analysis
python speech_analysis.py