@@ -186,6 +186,7 @@ prometheus:
186186 # prometheus, the central grafana used does go through nginx. So we
187187 # increase the timeout here as well.
188188 nginx.ingress.kubernetes.io/proxy-read-timeout : " 120"
189+ nginx.org/proxy-read-timeout : " 120"
189190 strategy :
190191 # type is set to Recreate as we have a persistent disk attached. The
191192 # default of RollingUpdate would fail by getting stuck waiting for a new
@@ -317,12 +318,14 @@ grafana:
317318 # request (via jupyterhub/grafana-dashboard's deploy script, from a github
318319 # workflow) to update dashboards with json files representing them.
319320 nginx.ingress.kubernetes.io/proxy-body-size : 64m
321+ nginx.org/client-max-body-size : 64m
320322 # Increase timeout for each query made by the grafana frontend to the
321323 # grafana backend to 2min, which is the default timeout for prometheus
322324 # queries. This also matches the timeout for the dataproxy in grafana,
323325 # set under `grafana.ini` below. These two timeouts are set together
324326 # to allow prometheus the best chance of executing queries we care about.
325327 nginx.ingress.kubernetes.io/proxy-read-timeout : " 120"
328+ nginx.org/proxy-read-timeout : " 120"
326329 cert-manager.io/cluster-issuer : letsencrypt-prod
327330
328331 # grafana is partially configured for GitHub authentication here, but the
0 commit comments