-
-
Notifications
You must be signed in to change notification settings - Fork 771
288 lines (258 loc) · 12.8 KB
/
test-core.yml
File metadata and controls
288 lines (258 loc) · 12.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
name: Core Tests
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]
jobs:
test-core:
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
matrix:
python-version: [3.11]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install UV
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
echo "$HOME/.local/bin" >> $GITHUB_PATH
- name: Install dependencies
run: |
cd src/praisonai
uv pip install --system ."[ui,gradio,api,agentops,google,openai,anthropic,cohere,chat,code,realtime,call,crewai,autogen]"
uv pip install --system duckduckgo_search
uv pip install --system pytest pytest-asyncio pytest-cov pytest-timeout
# Install knowledge dependencies from praisonai-agents
uv pip install --system "praisonaiagents[knowledge]"
- name: Set environment variables
run: |
echo "OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY || 'sk-test-key-for-github-actions-testing-only-not-real' }}" >> $GITHUB_ENV
echo "OPENAI_API_BASE=${{ secrets.OPENAI_API_BASE || 'https://api.openai.com/v1' }}" >> $GITHUB_ENV
echo "OPENAI_MODEL_NAME=${{ secrets.OPENAI_MODEL_NAME || 'gpt-5-nano' }}" >> $GITHUB_ENV
echo "LOGLEVEL=DEBUG" >> $GITHUB_ENV
echo "PYTHONPATH=${{ github.workspace }}/src/praisonai-agents:$PYTHONPATH" >> $GITHUB_ENV
# Also export to current shell session for immediate availability
export OPENAI_API_KEY="${{ secrets.OPENAI_API_KEY || 'sk-test-key-for-github-actions-testing-only-not-real' }}"
export OPENAI_API_BASE="${{ secrets.OPENAI_API_BASE || 'https://api.openai.com/v1' }}"
export OPENAI_MODEL_NAME="${{ secrets.OPENAI_MODEL_NAME || 'gpt-5-nano' }}"
export LOGLEVEL=DEBUG
# Verify immediate availability
echo "🔧 Immediate verification in same step:"
echo " OPENAI_API_KEY length in current session: ${#OPENAI_API_KEY}"
echo " OPENAI_API_KEY starts with sk-: $(echo "$OPENAI_API_KEY" | grep -q '^sk-' && echo 'YES' || echo 'NO')"
- name: Debug API Key Status
run: |
echo "🔍 Checking API key availability..."
if [ -n "${{ secrets.OPENAI_API_KEY }}" ]; then
echo "✅ GitHub secret OPENAI_API_KEY is available"
echo "🔑 API key starts with: $(echo "$OPENAI_API_KEY" | cut -c1-7)..."
else
echo "⚠️ GitHub secret OPENAI_API_KEY is NOT set - using fallback"
echo "🔑 Using fallback key: sk-test-key..."
fi
echo "🌐 API Base: $OPENAI_API_BASE"
echo "🤖 Model: $OPENAI_MODEL_NAME"
echo "🐛 Log Level: $LOGLEVEL"
echo "📊 Environment Check:"
echo " - OPENAI_API_KEY length: ${#OPENAI_API_KEY}"
echo " - OPENAI_API_BASE: $OPENAI_API_BASE"
echo " - OPENAI_MODEL_NAME: $OPENAI_MODEL_NAME"
echo " - LOGLEVEL: $LOGLEVEL"
echo "🔍 Is API key actually set?"
echo " - API key starts with sk-: $(echo "$OPENAI_API_KEY" | grep -q '^sk-' && echo 'YES' || echo 'NO')"
echo " - API key is not test key: $([ "$OPENAI_API_KEY" != 'sk-test-key-for-github-actions-testing-only-not-real' ] && echo 'YES' || echo 'NO')"
- name: Debug Environment Variables Raw
run: |
echo "🔧 Raw environment variable check:"
echo "OPENAI_API_KEY set: $(if [ -n "$OPENAI_API_KEY" ]; then echo 'YES'; else echo 'NO'; fi)"
echo "OPENAI_API_KEY length: ${#OPENAI_API_KEY}"
echo "OPENAI_API_KEY first 10 chars: ${OPENAI_API_KEY:0:10}"
echo "OPENAI_API_KEY last 5 chars: ${OPENAI_API_KEY: -5}"
printenv | grep OPENAI || echo "No OPENAI env vars found"
- name: Debug Python Environment Variables
run: |
python -c "
import os
print('🐍 Python Environment Variable Check:')
api_key = os.environ.get('OPENAI_API_KEY', 'NOT_SET')
if api_key != 'NOT_SET':
print(f' ✅ OPENAI_API_KEY: {api_key[:7]}... (length: {len(api_key)})')
else:
print(' ❌ OPENAI_API_KEY: NOT_SET')
print(f' 🌐 OPENAI_API_BASE: {os.environ.get(\"OPENAI_API_BASE\", \"NOT_SET\")}')
print(f' 🤖 OPENAI_MODEL_NAME: {os.environ.get(\"OPENAI_MODEL_NAME\", \"NOT_SET\")}')
print(f' 📋 All OPENAI env vars:')
for key, value in os.environ.items():
if key.startswith('OPENAI'):
print(f' {key}: {value[:10] if len(value) > 10 else value}...')
"
- name: Validate API Key
run: |
echo "🔑 Testing API key validity with minimal OpenAI call..."
python -c "
import os
try:
from openai import OpenAI
client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
response = client.models.list()
print('✅ API Key is VALID - OpenAI responded successfully')
print(f'📊 Available models: {len(list(response.data))} models found')
except Exception as e:
print(f'❌ API Key is INVALID - Error: {e}')
print('🔍 This explains why all API-dependent tests are failing')
print('💡 The GitHub secret OPENAI_API_KEY needs to be updated with a valid key')
"
continue-on-error: true
- name: Debug PraisonAI API Key Usage
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY || 'sk-test-key-for-github-actions-testing-only-not-real' }}
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE || 'https://api.openai.com/v1' }}
OPENAI_MODEL_NAME: ${{ secrets.OPENAI_MODEL_NAME || 'gpt-5-nano' }}
LOGLEVEL: DEBUG
run: |
echo "🔍 Testing PraisonAI API key usage directly..."
cd src/praisonai
python -c "
import os
import sys
sys.path.insert(0, '.')
# Attempt to import SecretStr, otherwise use a dummy class
try:
from pydantic.types import SecretStr
except ImportError:
class SecretStr: # Dummy class if pydantic is not available in this minimal context
def __init__(self, value): self._value = value
def get_secret_value(self): return self._value
def get_key_display_value(key_value):
if isinstance(key_value, SecretStr):
return key_value.get_secret_value()[:10] if key_value.get_secret_value() else 'EMPTY_SECRET'
elif isinstance(key_value, str):
return key_value[:10] if key_value != 'nokey' and key_value != 'NOT_SET' else key_value
return 'INVALID_TYPE'
print('🔧 Direct PraisonAI API Key Check:')
env_api_key = os.environ.get(\"OPENAI_API_KEY\", \"NOT_SET\")
print(f'Environment OPENAI_API_KEY: {get_key_display_value(env_api_key)}...')
from praisonai import PraisonAI
praisonai = PraisonAI()
print(f'PraisonAI config_list: {praisonai.config_list}')
api_key_from_config = praisonai.config_list[0].get('api_key', 'NOT_SET')
print(f'API key from PraisonAI config: {get_key_display_value(api_key_from_config)}...')
from praisonai.inc.models import PraisonAIModel
print('\\n🧪 Testing PraisonAIModel with explicit API key (CrewAI method):')
model_with_explicit_key = PraisonAIModel(
model='openai/gpt-5-nano',
base_url=praisonai.config_list[0].get('base_url'),
api_key=api_key_from_config # This will be a string from praisonai.config_list
)
print(f' Model: {model_with_explicit_key.model}')
print(f' Model name: {model_with_explicit_key.model_name}')
print(f' API key var: {model_with_explicit_key.api_key_var}')
# model_with_explicit_key.api_key is now a string, or 'nokey'
print(f' API key (explicitly passed to PraisonAIModel): {get_key_display_value(model_with_explicit_key.api_key)}...')
print(f' Base URL: {model_with_explicit_key.base_url}')
try:
llm_instance = model_with_explicit_key.get_model()
print(f' ✅ LLM instance created successfully: {type(llm_instance).__name__}')
# langchain_openai.ChatOpenAI stores the key in openai_api_key as SecretStr
llm_api_key_attr = getattr(llm_instance, 'openai_api_key', 'NOT_FOUND')
if llm_api_key_attr != 'NOT_FOUND':
print(f' LLM instance API key: {get_key_display_value(llm_api_key_attr)}...')
else:
print(f' LLM instance API key attribute not found.')
except Exception as e:
print(f' ❌ Failed to create LLM instance: {e}')
import traceback
traceback.print_exc()
"
continue-on-error: true
- name: Run Unit Tests
run: |
cd src/praisonai && python -m pytest tests/unit/ -v --tb=short --disable-warnings --cov=praisonai --cov-report=term-missing --cov-report=xml --cov-branch || echo "Some tests failed - see output above"
continue-on-error: true
- name: Run Integration Tests
run: |
cd src/praisonai && python -m pytest tests/integration/ -v --tb=short --disable-warnings
- name: Debug Directory Structure
run: |
echo "🔍 Debugging directory structure for CrewAI tests..."
cd src/praisonai
echo "Current working directory: $(pwd)"
echo "📁 Contents of current directory:"
ls -la
echo ""
echo "📁 Contents of tests directory:"
ls -la tests/ || echo "❌ tests/ directory not found"
echo ""
echo "📁 Contents of tests/integration:"
ls -la tests/integration/ || echo "❌ tests/integration/ directory not found"
echo ""
echo "📁 Looking for crewai directory:"
find . -name "crewai" -type d 2>/dev/null || echo "❌ No crewai directories found"
echo ""
echo "📁 Full directory tree of tests:"
tree tests/ || find tests/ -type d 2>/dev/null || echo "❌ Cannot explore tests directory"
- name: Test AutoGen Framework
run: |
echo "🤖 Testing AutoGen Framework Integration..."
cd src/praisonai && python tests/test_runner.py --pattern autogen --verbose
- name: Test CrewAI Framework
run: |
echo "⛵ Testing CrewAI Framework Integration..."
cd src/praisonai
echo "🔍 Trying test runner first..."
python tests/test_runner.py --pattern crewai --verbose || {
echo "❌ Test runner failed, trying direct pytest..."
echo "📁 Current directory: $(pwd)"
echo "📁 Looking for CrewAI tests..."
find . -name "*crewai*" -type f 2>/dev/null
echo "🧪 Trying direct pytest on integration/crewai..."
python -m pytest tests/integration/crewai/ -v --tb=short --disable-warnings || {
echo "❌ Direct path failed, trying relative path..."
python -m pytest integration/crewai/ -v --tb=short --disable-warnings || {
echo "❌ All CrewAI test attempts failed"
exit 1
}
}
}
- name: Run Legacy Tests with API Key
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY || 'sk-test-key-for-github-actions-testing-only-not-real' }}
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE || 'https://api.openai.com/v1' }}
OPENAI_MODEL_NAME: ${{ secrets.OPENAI_MODEL_NAME || 'gpt-5-nano' }}
LOGLEVEL: DEBUG
run: |
echo "🧪 Running legacy tests with real API key..."
echo "🔧 Final environment check before pytest:"
echo " OPENAI_API_KEY set: $([ -n "$OPENAI_API_KEY" ] && echo 'YES' || echo 'NO')"
echo " OPENAI_API_KEY length: ${#OPENAI_API_KEY}"
echo " OPENAI_API_KEY starts with sk-: $(echo "$OPENAI_API_KEY" | grep -q '^sk-' && echo 'YES' || echo 'NO')"
export OPENAI_API_KEY="$OPENAI_API_KEY"
export OPENAI_API_BASE="$OPENAI_API_BASE"
export OPENAI_MODEL_NAME="$OPENAI_MODEL_NAME"
cd src/praisonai && python -m pytest tests/test.py -v --tb=short --disable-warnings -m "not real" --timeout=60
- name: Upload coverage reports to Codecov
if: matrix.python-version == '3.11'
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: src/praisonai/coverage.xml
flags: core-tests
name: core-tests-coverage
fail_ci_if_error: false
verbose: true
- name: Upload Coverage Reports (Artifacts)
if: matrix.python-version == '3.11'
uses: actions/upload-artifact@v4
with:
name: coverage-reports
path: |
src/praisonai/.coverage
src/praisonai/htmlcov/
src/praisonai/coverage.xml
retention-days: 7