diff --git a/deploy/docker/fs/etc/supervisor/supervisord.conf b/deploy/docker/fs/etc/supervisor/supervisord.conf index 21d4f98d696a..eb1bedac47a7 100644 --- a/deploy/docker/fs/etc/supervisor/supervisord.conf +++ b/deploy/docker/fs/etc/supervisor/supervisord.conf @@ -10,7 +10,7 @@ username=%(ENV_APPSMITH_SUPERVISOR_USER)s password=%(ENV_APPSMITH_SUPERVISOR_PASSWORD)s [supervisord] -logfile=%(ENV_APPSMITH_LOG_DIR)s/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) +logfile=%(ENV_APPSMITH_LOG_DIR)s/supervisor/%(ENV_HOSTNAME)s-stdout.log ; (main log file;default $CWD/supervisord.log) pidfile=%(ENV_TMP)s/supervisord.pid ; (supervisord pidfile;default supervisord.pid) childlogdir=%(ENV_APPSMITH_LOG_DIR)s/supervisor ; ('AUTO' child log dir, default $TEMP) stdout_logfile_maxbytes = 0 @@ -35,13 +35,12 @@ serverurl=unix://%(ENV_TMP)s/supervisor.sock ; use a unix:// URL for a unix soc files = %(ENV_SUPERVISORD_CONF_TARGET)s/*.conf [eventlistener:stdout] +# This eventlistener sends logs to the python handler in /usr/lib/python3/dist-packages/supervisor/appsmith_supervisor_stdout.py created in this repo +# It sends logs for individual processes to stdout/stderr of the main process, which lets logging pipelines to capture logs from each process. +# Supervisor then wants to send these messages to a logfile by default, so directing them to /dev/null prevents double logging. command = python3 -m supervisor.appsmith_supervisor_stdout buffer_size = 10000 events = PROCESS_LOG result_handler = supervisor.appsmith_supervisor_stdout:event_handler -stdout_logfile=%(ENV_APPSMITH_LOG_DIR)s/supervisor/access-supervisor-%(ENV_HOSTNAME)s.log -stderr_logfile=%(ENV_APPSMITH_LOG_DIR)s/supervisor/error-supervisor-%(ENV_HOSTNAME)s.log -stdout_logfile_maxbytes=10MB -stderr_logfile_maxbytes=10MB -stdout_logfile_backups=10 -stderr_logfile_backups=10 +stdout_logfile=/dev/null +stderr_logfile=/dev/null diff --git a/deploy/docker/fs/opt/appsmith/caddy-reconfigure.mjs b/deploy/docker/fs/opt/appsmith/caddy-reconfigure.mjs index ee605f92cf80..9a298c485c9e 100644 --- a/deploy/docker/fs/opt/appsmith/caddy-reconfigure.mjs +++ b/deploy/docker/fs/opt/appsmith/caddy-reconfigure.mjs @@ -42,7 +42,6 @@ const parts = [] parts.push(` { - debug admin 0.0.0.0:2019 persist_config off acme_ca_root /etc/ssl/certs/ca-certificates.crt diff --git a/scripts/deploy_preview.sh b/scripts/deploy_preview.sh index 26cdd33124b6..40c796d0c07c 100755 --- a/scripts/deploy_preview.sh +++ b/scripts/deploy_preview.sh @@ -49,12 +49,16 @@ kubectl get pods # Optional cleanup logic if [[ -n "${RECREATE-}" ]]; then - mongosh "mongodb+srv://$DB_USERNAME:$DB_PASSWORD@$DB_URL/$DBNAME?retryWrites=true&minPoolSize=1&maxPoolSize=10&maxIdleTimeMS=900000&authSource=admin" --eval 'db.dropDatabase()' + echo "Wiping the DP from MongoDB and Kubernetes since the reset flag was set" pod_name="$(kubectl get pods -n "$NAMESPACE" -o json | jq -r '.items[0].metadata.name')" + # execute this db.dropDatabase() from the k8s cluster because there's network restrictions on Atlas cluster. + # The \$ is used to escape the $ character in the APPSMITH_DB_URL environment variable so it's interpolated inside the kubectl exec command. + kubectl exec "$pod_name" -n "$NAMESPACE" -- bash -c "mongosh \$APPSMITH_DB_URL --eval 'db.dropDatabase()'" kubectl exec "$pod_name" -n "$NAMESPACE" -- bash -c "rm -rf /appsmith-stacks/*" kubectl delete ns "$NAMESPACE" || true kubectl patch pv "$NAMESPACE-appsmith" -p '{"metadata":{"finalizers":null}}' || true kubectl delete pv "$NAMESPACE-appsmith" --grace-period=0 --force || true + echo "DP wiped from MongoDB and Kubernetes" fi # Create namespace and image pull secret