|
1 | | -server { |
2 | | - if ($host = <DOMAIN_NAME>) { |
3 | | - return 301 https://$host$request_uri; |
4 | | - } # managed by Certbot |
5 | | - |
6 | | - listen 80; |
7 | | - server_name <DOMAIN_NAME>; |
| 1 | +# This is a new file: src/server/nginx.conf |
8 | 2 |
|
9 | | - # This block is for handling Let's Encrypt certificate challenges |
10 | | - location /.well-known/acme-challenge/ { |
11 | | - root <WEB_ROOT_PATH>; |
12 | | - } |
13 | | - |
14 | | - # For all other traffic, redirect to HTTPS |
15 | | - location / { |
16 | | - return 301 https://$host$request_uri; |
17 | | - } |
| 3 | +upstream backend_app { |
| 4 | + # This points to your FastAPI/Uvicorn server running on localhost inside the container. |
| 5 | + server 127.0.0.1:5000; |
18 | 6 | } |
19 | 7 |
|
20 | 8 | server { |
21 | | - listen 443 ssl http2; |
22 | | - server_name <DOMAIN_NAME>; |
23 | | - |
24 | | - # SSL cert paths will be filled in automatically by Certbot later |
25 | | - ssl_certificate <SSL_CERT_PATH>; # managed by Certbot |
26 | | - ssl_certificate_key <SSL_CERT_KEY_PATH>; # managed by Certbot |
| 9 | + # Nginx will listen on port 80 inside the container. |
| 10 | + # An external load balancer (like on GCP) or Docker port mapping will handle public-facing traffic. |
| 11 | + listen 80; |
| 12 | + server_name _; # Catch all domains |
27 | 13 |
|
| 14 | + # Increase client body size for file uploads |
28 | 15 | client_max_body_size 10M; |
29 | 16 |
|
30 | | - location / { |
31 | | - proxy_pass http://<BACKEND_HOST>:<BACKEND_PORT>; |
| 17 | + # Location block for your chat stream (and other streaming endpoints) |
| 18 | + location ~ ^/(chat/message|api/search/unified)$ { |
| 19 | + proxy_pass http://backend_app; |
32 | 20 | proxy_set_header Host $host; |
33 | 21 | proxy_set_header X-Real-IP $remote_addr; |
34 | 22 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
35 | 23 | proxy_set_header X-Forwarded-Proto $scheme; |
36 | 24 |
|
37 | | - # WebSocket Headers - CRITICAL for wss:// to work |
| 25 | + proxy_read_timeout 86400s; # Prevent timeout during long LLM responses |
| 26 | + proxy_send_timeout 86400s; |
| 27 | + |
| 28 | + # Crucial for Server-Sent Events (SSE) and streaming |
| 29 | + proxy_buffering off; |
| 30 | + proxy_cache off; |
| 31 | + proxy_request_buffering off; |
| 32 | + proxy_http_version 1.1; |
| 33 | + proxy_set_header Connection ''; |
| 34 | + chunked_transfer_encoding on; |
| 35 | + |
| 36 | + # Headers to prevent buffering by intermediate proxies/CDNs |
| 37 | + add_header Cache-Control "no-cache"; |
| 38 | + add_header X-Accel-Buffering "no" always; |
| 39 | + } |
| 40 | + |
| 41 | + # Location block for WebSocket connections (notifications and voice) |
| 42 | + location ~ ^/(api/ws/notifications|voice) { |
| 43 | + proxy_pass http://backend_app; |
38 | 44 | proxy_http_version 1.1; |
39 | 45 | proxy_set_header Upgrade $http_upgrade; |
40 | | - proxy_set_header Connection "upgrade"; |
| 46 | + proxy_set_header Connection "Upgrade"; |
| 47 | + proxy_set_header Host $host; |
| 48 | + proxy_set_header X-Real-IP $remote_addr; |
| 49 | + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
| 50 | + proxy_set_header X-Forwarded-Proto $scheme; |
| 51 | + proxy_read_timeout 86400s; # Keep WebSocket connections alive |
| 52 | + proxy_send_timeout 86400s; |
| 53 | + } |
| 54 | + |
| 55 | + # General location block for all other API requests |
| 56 | + location / { |
| 57 | + proxy_pass http://backend_app; |
| 58 | + proxy_set_header Host $host; |
| 59 | + proxy_set_header X-Real-IP $remote_addr; |
| 60 | + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; |
| 61 | + proxy_set_header X-Forwarded-Proto $scheme; |
| 62 | + proxy_connect_timeout 600s; |
| 63 | + proxy_send_timeout 600s; |
| 64 | + proxy_read_timeout 600s; |
41 | 65 | } |
42 | 66 | } |
| 67 | + |
0 commit comments