Skip to content

Commit 1af54ac

Browse files
Merge branch 'main' into renovate/actions-checkout-5.x
2 parents e2f9589 + 7eca6b0 commit 1af54ac

File tree

24 files changed

+1547
-276
lines changed

24 files changed

+1547
-276
lines changed

.github/workflows/test.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ jobs:
99

1010
steps:
1111
- uses: actions/checkout@v5
12-
- uses: actions/setup-node@v4
12+
- uses: actions/setup-node@v5
1313
with:
1414
node-version: '20'
1515
cache: 'npm'

README.md

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@ Some key features include:
88
- Custom system prompts managed by teachers
99
- Retrieval-augmented generation: teachers can bring their own source material for a course
1010

11+
<!-- This badge enables weekly refreshes to CurreChat deepwiki page (deepwiki/gptwrapper) -->
12+
13+
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/UniversityOfHelsinkiCS/gptwrapper)
14+
1115
## Development
1216

1317
Quickstart:
@@ -34,6 +38,7 @@ Azure is used for OpenAI LLMs.
3438
Create an AI foundry resource (or something) and create deployment for the models you want to use (gpt-5 for example). Always set the deployment name to the acual model name. So for model gpt-5, the deployment name should be gpt-5.
3539

3640
Then populate .env with the following:
41+
3742
```
3843
AZURE_RESOURCE=<name-of-the-resource-you-created>
3944
AZURE_API_KEY=<asd>
@@ -44,6 +49,7 @@ AZURE_API_KEY=<asd>
4449
S3 is used for storing user-uploaded files and their processed versions.
4550

4651
Create an S3 bucket and populate .env with the following:
52+
4753
```
4854
S3_HOST=<host-url>
4955
S3_BUCKET=<name-of-the-bucket-you-created>
@@ -56,6 +62,7 @@ S3_SECRET_KEY=<secret-key>
5662
### Debugging in production
5763

5864
In browser console, run
65+
5966
```
6067
toggleDevtools()
6168
```
@@ -65,6 +72,7 @@ toggleDevtools()
6572
Getting `Error: Cannot find module @rollup/rollup-linux-arm64-musl` on MacOS?
6673
This is likely because you ran `npm i` locally.
6774
Try removing package-lock.json locally and running
75+
6876
```
6977
docker compose build
7078
```
@@ -76,6 +84,7 @@ If then you're getting `concurrently not found`, prepend the `npm run dev` scrip
7684
Playwright e2e tests are located in `e2e`. `playwright.config.ts` is also important.
7785

7886
Run the tests with
87+
7988
```bash
8089
npm run e2e
8190
```
@@ -85,15 +94,17 @@ To run just one test, mark it with `.only`:
8594
```ts
8695
test.only('test name', async ({ page }) => {
8796
// test code
88-
});
97+
})
8998
```
9099

91100
When writing new spec file, make sure to import the test function from the fixtures file, like this:
101+
92102
```ts
93103
import { teacherTest as test } from './fixtures'
94104
```
95105

96106
So that the global foreach function runs for your tests. For different user roles (`studentTest`, `teacherTest`, `adminTest`), import the corresponding test function:
107+
97108
```ts
98109
import { studentTest as test } from './fixtures'
99110
```

compose.yaml

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -42,26 +42,25 @@ services:
4242
- redis_data:/data
4343

4444
# Comment out if you use local ollama. Set OLLAMA_URL=host.docker.internal:11434 if using recent Docker Desktop.
45-
ollama:
46-
image: ollama/ollama
47-
ports:
48-
- 11434:11434 # expose the Ollama API to the host
49-
volumes:
50-
- ollama_data:/root/.ollama # persistent model storage
51-
entrypoint: ["/bin/bash", "-c", "\
52-
ollama serve & \
53-
sleep 5 && \
54-
ollama pull qwen2.5vl:7b && \
55-
wait"]
45+
# ollama:
46+
# image: ollama/ollama
47+
# ports:
48+
# - 11434:11434 # expose the Ollama API to the host
49+
# volumes:
50+
# - ollama_data:/root/.ollama # persistent model storage
51+
# entrypoint: ["/bin/bash", "-c", "\
52+
# ollama serve & \
53+
# sleep 5 && \
54+
# ollama pull qwen2.5vl:7b && \
55+
# wait"]
5656

5757
dalai:
58-
# # image: toska/dalai:latest
5958
build:
6059
context: ./dalai
6160
dockerfile: dev.Dockerfile
6261
environment:
63-
# - OLLAMA_URL=http://host.docker.internal:11434
64-
- OLLAMA_URL=http://ollama:11434
62+
- OLLAMA_URL=http://host.docker.internal:11434
63+
# - OLLAMA_URL=http://ollama:11434
6564
- REDIS_HOST=redis
6665
- REDIS_PORT=6379
6766
- S3_HOST=http://minio:9000
@@ -72,7 +71,7 @@ services:
7271
- dalai_data:/app
7372
container_name: gptwrapper_dalai
7473
depends_on:
75-
- redis
74+
- redis
7675

7776
minio:
7877
image: minio/minio:latest
@@ -112,7 +111,6 @@ services:
112111
volumes:
113112
pg_data:
114113
redis_data:
115-
chromadb_data:
116114
ollama_data:
117115
dalai_data:
118116
minio-data:

dalai/logger.ts

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
import { token } from 'morgan'
2+
import winston from 'winston'
3+
import LokiTransport from 'winston-loki'
4+
5+
type Transport = winston.transport
6+
7+
const transports: Transport[] = []
8+
9+
const LOKI_HOST = `https://api-toska.apps.ocp-prod-0.k8s.it.helsinki.fi/lokki`
10+
11+
const LOKI_TOKEN = process.env.LOKI_TOKEN ?? ''
12+
13+
const levels = {
14+
error: 0,
15+
warn: 1,
16+
info: 2,
17+
http: 3,
18+
verbose: 4,
19+
debug: 5,
20+
silly: 6,
21+
}
22+
23+
const prodFormat = winston.format.printf(({ level, ...rest }) =>
24+
JSON.stringify({
25+
level: levels[level],
26+
...rest,
27+
}),
28+
)
29+
30+
transports.push(new winston.transports.Console({ format: prodFormat }))
31+
32+
transports.push(
33+
new LokiTransport({
34+
host: LOKI_HOST,
35+
headers: {
36+
token: LOKI_TOKEN,
37+
},
38+
labels: {
39+
app: 'dalai',
40+
},
41+
}),
42+
)
43+
44+
const logger = winston.createLogger({ transports })
45+
46+
export default logger

0 commit comments

Comments
 (0)