|
1 | 1 | #!/usr/bin/env bash |
2 | 2 |
|
3 | 3 | # Script to start Cloudflare Workers for testing |
4 | | -# This script starts all workers (D1 Sync, API, Plugin, Files) in the background |
| 4 | +# This script starts all workers (API, Plugin, Files) in the background |
5 | 5 |
|
6 | 6 | set -e |
7 | 7 |
|
| 8 | +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
| 9 | +ROOT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" |
| 10 | + |
8 | 11 | echo "Starting Cloudflare Workers for testing..." |
9 | 12 |
|
10 | 13 | # Colors for output |
11 | 14 | GREEN='\033[0;32m' |
12 | 15 | YELLOW='\033[1;33m' |
13 | 16 | NC='\033[0m' # No Color |
14 | 17 |
|
| 18 | +# Use the installed Supabase CLI in CI, fall back to bunx locally. |
| 19 | +if command -v supabase >/dev/null 2>&1; then |
| 20 | + SUPABASE_CLI="supabase" |
| 21 | +else |
| 22 | + SUPABASE_CLI="bunx supabase" |
| 23 | +fi |
| 24 | + |
| 25 | +# Extract a single variable from `supabase status -o env`, preserving any '=' in values (JWT padding). |
| 26 | +get_supabase_status_var() { |
| 27 | + local key_regex="$1" |
| 28 | + # Output looks like: KEY="value" or KEY=value |
| 29 | + printf '%s\n' "${SUPA_ENV}" \ |
| 30 | + | grep -E "^(${key_regex})=" \ |
| 31 | + | head -n 1 \ |
| 32 | + | sed -E 's/^[^=]+=//' \ |
| 33 | + | sed -E 's/^"//; s/"$//' |
| 34 | +} |
| 35 | + |
| 36 | +# Build a runtime env file with local Supabase keys so we don't commit secrets. |
| 37 | +BASE_ENV_FILE="${ROOT_DIR}/cloudflare_workers/.env.local" |
| 38 | +RUNTIME_ENV_FILE="$(mktemp "${TMPDIR:-/tmp}/capgo-cloudflare-env.XXXXXX")" |
| 39 | +chmod 600 "${RUNTIME_ENV_FILE}" |
| 40 | +if [ -f "${BASE_ENV_FILE}" ]; then |
| 41 | + cp "${BASE_ENV_FILE}" "${RUNTIME_ENV_FILE}" |
| 42 | +else |
| 43 | + echo -e "${YELLOW}Warning: ${BASE_ENV_FILE} not found - starting with empty base env${NC}" |
| 44 | +fi |
| 45 | + |
| 46 | +SUPA_ENV="$(${SUPABASE_CLI} status -o env 2>/dev/null || true)" |
| 47 | +SUPABASE_URL_FROM_STATUS="$(get_supabase_status_var 'API_URL')" |
| 48 | +# Supabase CLI has historically emitted either SERVICE_ROLE_KEY/ANON_KEY or SECRET_KEY/PUBLISHABLE_KEY. |
| 49 | +SUPABASE_SERVICE_ROLE_KEY_FROM_STATUS="$(get_supabase_status_var 'SERVICE_ROLE_KEY|SECRET_KEY')" |
| 50 | +SUPABASE_ANON_KEY_FROM_STATUS="$(get_supabase_status_var 'ANON_KEY|PUBLISHABLE_KEY')" |
| 51 | + |
| 52 | +# Allow overrides via environment, otherwise use supabase status output. |
| 53 | +SUPABASE_URL="${SUPABASE_URL:-${SUPABASE_URL_FROM_STATUS}}" |
| 54 | +SUPABASE_SERVICE_ROLE_KEY="${SUPABASE_SERVICE_ROLE_KEY:-${SUPABASE_SERVICE_ROLE_KEY_FROM_STATUS}}" |
| 55 | +SUPABASE_ANON_KEY="${SUPABASE_ANON_KEY:-${SUPABASE_ANON_KEY_FROM_STATUS}}" |
| 56 | + |
| 57 | +if [ -z "${SUPABASE_SERVICE_ROLE_KEY}" ] || [ -z "${SUPABASE_ANON_KEY}" ] || [ -z "${SUPABASE_URL}" ]; then |
| 58 | + echo -e "${YELLOW}Missing Supabase keys for Cloudflare Workers.${NC}" |
| 59 | + echo "Ensure Supabase is running, or set SUPABASE_URL, SUPABASE_SERVICE_ROLE_KEY and SUPABASE_ANON_KEY in your environment." |
| 60 | + exit 1 |
| 61 | +fi |
| 62 | + |
| 63 | +# Cloudflare local testing defaults. |
| 64 | +CLOUDFLARE_FUNCTION_URL="${CLOUDFLARE_FUNCTION_URL:-http://127.0.0.1:8787}" |
| 65 | +STRIPE_WEBHOOK_SECRET="${STRIPE_WEBHOOK_SECRET:-testsecret}" |
| 66 | + |
| 67 | +# In CI/linux, `host.docker.internal` is unreliable. Prefer localhost (mapped ports). |
| 68 | +S3_ENDPOINT_TO_USE="${S3_ENDPOINT:-127.0.0.1:9000}" |
| 69 | + |
| 70 | +cat >> "${RUNTIME_ENV_FILE}" <<EOF |
| 71 | +SUPABASE_URL=${SUPABASE_URL} |
| 72 | +SUPABASE_SERVICE_ROLE_KEY=${SUPABASE_SERVICE_ROLE_KEY} |
| 73 | +SUPABASE_ANON_KEY=${SUPABASE_ANON_KEY} |
| 74 | +CLOUDFLARE_FUNCTION_URL=${CLOUDFLARE_FUNCTION_URL} |
| 75 | +STRIPE_WEBHOOK_SECRET=${STRIPE_WEBHOOK_SECRET} |
| 76 | +S3_ENDPOINT=${S3_ENDPOINT_TO_USE} |
| 77 | +EOF |
| 78 | + |
15 | 79 | # Kill any existing wrangler processes |
16 | 80 | echo -e "${YELLOW}Cleaning up existing wrangler processes...${NC}" |
17 | 81 | pkill -f "wrangler dev" || true |
18 | 82 | sleep 2 |
19 | 83 |
|
20 | | -# Wait a bit for the sync worker to start |
21 | | -sleep 3 |
22 | | - |
23 | 84 | # Start API worker on port 8787 |
24 | 85 | echo -e "${GREEN}Starting API worker on port 8787...${NC}" |
25 | | -(cd cloudflare_workers/api && bunx wrangler dev -c wrangler.jsonc --port 8787 --env-file=../.env.local --env=local --persist-to ../../.wrangler-shared) & |
| 86 | +(cd "${ROOT_DIR}/cloudflare_workers/api" && bunx wrangler dev -c wrangler.jsonc --port 8787 --env-file="${RUNTIME_ENV_FILE}" --env=local --persist-to "${ROOT_DIR}/.wrangler-shared") & |
26 | 87 | API_PID=$! |
27 | 88 |
|
28 | 89 | # Wait a bit for the first worker to start |
29 | 90 | sleep 3 |
30 | 91 |
|
31 | 92 | # Start Plugin worker on port 8788 |
32 | 93 | echo -e "${GREEN}Starting Plugin worker on port 8788...${NC}" |
33 | | -(cd cloudflare_workers/plugin && bunx wrangler dev -c wrangler.jsonc --port 8788 --env-file=../.env.local --env=local --persist-to ../../.wrangler-shared) & |
| 94 | +(cd "${ROOT_DIR}/cloudflare_workers/plugin" && bunx wrangler dev -c wrangler.jsonc --port 8788 --env-file="${RUNTIME_ENV_FILE}" --env=local --persist-to "${ROOT_DIR}/.wrangler-shared") & |
34 | 95 | PLUGIN_PID=$! |
35 | 96 |
|
36 | 97 | # Wait a bit for the second worker to start |
37 | 98 | sleep 3 |
38 | 99 |
|
39 | 100 | # Start Files worker on port 8789 |
40 | 101 | echo -e "${GREEN}Starting Files worker on port 8789...${NC}" |
41 | | -(cd cloudflare_workers/files && bunx wrangler dev -c wrangler.jsonc --port 8789 --env-file=../.env.local --env=local --persist-to ../../.wrangler-shared) & |
| 102 | +(cd "${ROOT_DIR}/cloudflare_workers/files" && bunx wrangler dev -c wrangler.jsonc --port 8789 --env-file="${RUNTIME_ENV_FILE}" --env=local --persist-to "${ROOT_DIR}/.wrangler-shared") & |
42 | 103 | FILES_PID=$! |
43 | 104 |
|
44 | 105 | echo -e "${GREEN}All workers started!${NC}" |
45 | | -echo "D1 Sync Worker PID: $SYNC_PID (http://127.0.0.1:8790)" |
46 | 106 | echo "API Worker PID: $API_PID (http://127.0.0.1:8787)" |
47 | 107 | echo "Plugin Worker PID: $PLUGIN_PID (http://127.0.0.1:8788)" |
48 | 108 | echo "Files Worker PID: $FILES_PID (http://127.0.0.1:8789)" |
49 | 109 | echo "" |
50 | 110 |
|
51 | | -# Queue initial data to D1 via PGMQ (production-like approach) |
52 | | -echo -e "${GREEN}Queueing initial data for D1 sync...${NC}" |
53 | | -psql postgresql://postgres: [email protected]:54322/postgres -f scripts/trigger-initial-d1-sync.sql > /dev/null 2>&1 |
54 | | -if [ $? -eq 0 ]; then |
55 | | - echo -e "${GREEN}✓ Initial data queued to PGMQ${NC}" |
56 | | - |
57 | | - # Trigger sync worker to process the queue |
58 | | - echo -e "${GREEN}Triggering D1 sync worker...${NC}" |
59 | | - curl -s -X POST http://127.0.0.1:8790/sync -H "x-webhook-signature: testsecret" > /dev/null 2>&1 |
60 | | - if [ $? -eq 0 ]; then |
61 | | - echo -e "${GREEN}✓ D1 sync triggered successfully${NC}" |
62 | | - sleep 2 |
63 | | - echo -e "${GREEN}✓ D1 database is now ready with initial data${NC}" |
64 | | - else |
65 | | - echo -e "${YELLOW}⚠ Warning: Failed to trigger D1 sync${NC}" |
66 | | - fi |
67 | | -else |
68 | | - echo -e "${YELLOW}⚠ Warning: Failed to queue initial data${NC}" |
69 | | -fi |
70 | | - |
71 | 111 | echo "" |
72 | 112 | echo "Press Ctrl+C to stop all workers" |
73 | 113 |
|
74 | 114 | # Function to cleanup on exit |
75 | 115 | cleanup() { |
76 | 116 | echo -e "\n${YELLOW}Stopping workers...${NC}" |
77 | | - kill $SYNC_PID $API_PID $PLUGIN_PID $FILES_PID 2>/dev/null || true |
| 117 | + kill $API_PID $PLUGIN_PID $FILES_PID 2>/dev/null || true |
78 | 118 | pkill -f "wrangler dev" || true |
| 119 | + rm -f "${RUNTIME_ENV_FILE}" 2>/dev/null || true |
79 | 120 | echo -e "${GREEN}All workers stopped${NC}" |
80 | 121 | } |
81 | 122 |
|
|
0 commit comments