diff --git a/apps/docs/content/_partials/quickstart_db_setup.mdx b/apps/docs/content/_partials/quickstart_db_setup.mdx
index 161ea8b7f5edb..b8193923efca6 100644
--- a/apps/docs/content/_partials/quickstart_db_setup.mdx
+++ b/apps/docs/content/_partials/quickstart_db_setup.mdx
@@ -20,7 +20,7 @@ curl -X POST https://api.supabase.com/v1/projects \
"organization_id": "",
"name": "My Project",
"region": "us-east-1",
- "password": ""
+ "db_pass": ""
}'
```
diff --git a/apps/docs/content/guides/auth/social-login/auth-google.mdx b/apps/docs/content/guides/auth/social-login/auth-google.mdx
index aa82bd3dcaf18..bb15372b26bb7 100644
--- a/apps/docs/content/guides/auth/social-login/auth-google.mdx
+++ b/apps/docs/content/guides/auth/social-login/auth-google.mdx
@@ -376,82 +376,72 @@ If you're integrating Google One-Tap with your Next.js application, you can refe
import Script from 'next/script'
import { createClient } from '@/utils/supabase/client'
-import { CredentialResponse } from 'google-one-tap'
+import type { accounts, CredentialResponse } from 'google-one-tap'
import { useRouter } from 'next/navigation'
-import { useEffect } from 'react'
+
+declare const google: { accounts: accounts }
+
+// generate nonce to use for google id token sign-in
+const generateNonce = async (): Promise => {
+ const nonce = btoa(String.fromCharCode(...crypto.getRandomValues(new Uint8Array(32))))
+ const encoder = new TextEncoder()
+ const encodedNonce = encoder.encode(nonce)
+ const hashBuffer = await crypto.subtle.digest('SHA-256', encodedNonce)
+ const hashArray = Array.from(new Uint8Array(hashBuffer))
+ const hashedNonce = hashArray.map((b) => b.toString(16).padStart(2, '0')).join('')
+
+ return [nonce, hashedNonce]
+}
const OneTapComponent = () => {
const supabase = createClient()
const router = useRouter()
- // generate nonce to use for google id token sign-in
- const generateNonce = async (): Promise => {
- const nonce = btoa(String.fromCharCode(...crypto.getRandomValues(new Uint8Array(32))))
- const encoder = new TextEncoder()
- const encodedNonce = encoder.encode(nonce)
- const hashBuffer = await crypto.subtle.digest('SHA-256', encodedNonce)
- const hashArray = Array.from(new Uint8Array(hashBuffer))
- const hashedNonce = hashArray.map((b) => b.toString(16).padStart(2, '0')).join('')
+ const initializeGoogleOneTap = () => {
+ console.log('Initializing Google One Tap')
+ const [nonce, hashedNonce] = await generateNonce()
+ console.log('Nonce: ', nonce, hashedNonce)
- return [nonce, hashedNonce]
- }
+ // check if there's already an existing session before initializing the one-tap UI
+ const { data, error } = await supabase.auth.getSession()
+ if (error) {
+ console.error('Error getting session', error)
+ }
+ if (data.session) {
+ router.push('/')
+ return
+ }
- useEffect(() => {
- const initializeGoogleOneTap = () => {
- console.log('Initializing Google One Tap')
- window.addEventListener('load', async () => {
- const [nonce, hashedNonce] = await generateNonce()
- console.log('Nonce: ', nonce, hashedNonce)
-
- // check if there's already an existing session before initializing the one-tap UI
- const { data, error } = await supabase.auth.getSession()
- if (error) {
- console.error('Error getting session', error)
- }
- if (data.session) {
+ /* global google */
+ google.accounts.id.initialize({
+ client_id: process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID,
+ callback: async (response: CredentialResponse) => {
+ try {
+ // send id token returned in response.credential to supabase
+ const { data, error } = await supabase.auth.signInWithIdToken({
+ provider: 'google',
+ token: response.credential,
+ nonce,
+ })
+
+ if (error) throw error
+ console.log('Session data: ', data)
+ console.log('Successfully logged in with Google One Tap')
+
+ // redirect to protected page
router.push('/')
- return
+ } catch (error) {
+ console.error('Error logging in with Google One Tap', error)
}
+ },
+ nonce: hashedNonce,
+ // with chrome's removal of third-party cookies, we need to use FedCM instead (https://developers.google.com/identity/gsi/web/guides/fedcm-migration)
+ use_fedcm_for_prompt: true,
+ })
+ google.accounts.id.prompt() // Display the One Tap UI
+ }
- /* global google */
- google.accounts.id.initialize({
- client_id: process.env.NEXT_PUBLIC_GOOGLE_CLIENT_ID,
- callback: async (response: CredentialResponse) => {
- try {
- // send id token returned in response.credential to supabase
- const { data, error } = await supabase.auth.signInWithIdToken({
- provider: 'google',
- token: response.credential,
- nonce,
- })
-
- if (error) throw error
- console.log('Session data: ', data)
- console.log('Successfully logged in with Google One Tap')
-
- // redirect to protected page
- router.push('/')
- } catch (error) {
- console.error('Error logging in with Google One Tap', error)
- }
- },
- nonce: hashedNonce,
- // with chrome's removal of third-party cookies, we need to use FedCM instead (https://developers.google.com/identity/gsi/web/guides/fedcm-migration)
- use_fedcm_for_prompt: true,
- })
- google.accounts.id.prompt() // Display the One Tap UI
- })
- }
- initializeGoogleOneTap()
- return () => window.removeEventListener('load', initializeGoogleOneTap)
- }, [])
-
- return (
- <>
-
-
- >
- )
+ return
}
export default OneTapComponent
diff --git a/apps/studio/components/interfaces/App/FeaturePreview/FeaturePreviewContext.tsx b/apps/studio/components/interfaces/App/FeaturePreview/FeaturePreviewContext.tsx
index 0ab0b040922d6..2578ca33fa216 100644
--- a/apps/studio/components/interfaces/App/FeaturePreview/FeaturePreviewContext.tsx
+++ b/apps/studio/components/interfaces/App/FeaturePreview/FeaturePreviewContext.tsx
@@ -11,6 +11,7 @@ import {
} from 'react'
import { FeatureFlagContext, LOCAL_STORAGE_KEYS } from 'common'
+import { useSelectedOrganization } from 'hooks/misc/useSelectedOrganization'
import { useFlag, useIsRealtimeSettingsFFEnabled } from 'hooks/ui/useFlag'
import { EMPTY_OBJ } from 'lib/void'
import { FEATURE_PREVIEWS } from './FeaturePreview.constants'
@@ -91,8 +92,12 @@ export const useIsInlineEditorEnabled = () => {
}
export const useUnifiedLogsPreview = () => {
+ const organization = useSelectedOrganization()
const { flags, onUpdateFlag } = useFeaturePreviewContext()
- const isEnabled = flags[LOCAL_STORAGE_KEYS.UI_PREVIEW_UNIFIED_LOGS]
+
+ const isTeamsOrEnterprise = ['team', 'enterprise'].includes(organization?.plan.id ?? '')
+ const isEnabled = isTeamsOrEnterprise && flags[LOCAL_STORAGE_KEYS.UI_PREVIEW_UNIFIED_LOGS]
+
const enable = () => onUpdateFlag(LOCAL_STORAGE_KEYS.UI_PREVIEW_UNIFIED_LOGS, true)
const disable = () => onUpdateFlag(LOCAL_STORAGE_KEYS.UI_PREVIEW_UNIFIED_LOGS, false)
return { isEnabled, enable, disable }
diff --git a/apps/studio/components/interfaces/App/FeaturePreview/UnifiedLogsPreview.tsx b/apps/studio/components/interfaces/App/FeaturePreview/UnifiedLogsPreview.tsx
index 981d98ad42ec5..b757cba8f45a1 100644
--- a/apps/studio/components/interfaces/App/FeaturePreview/UnifiedLogsPreview.tsx
+++ b/apps/studio/components/interfaces/App/FeaturePreview/UnifiedLogsPreview.tsx
@@ -19,6 +19,9 @@ export const UnifiedLogsPreview = () => {
Experience our enhanced logs interface with improved filtering, real-time updates, and a
unified view across all your services. Built for better performance and easier debugging.
+
+ This interface will only be available for organizations on the Team plan or above.
+
Enabling this preview will:
diff --git a/apps/studio/components/interfaces/UnifiedLogs/Queries/ServiceFlowQueries/ServiceFlow.sql.ts b/apps/studio/components/interfaces/UnifiedLogs/Queries/ServiceFlowQueries/ServiceFlow.sql.ts
index 748930dc6e130..d8af15cdb853f 100644
--- a/apps/studio/components/interfaces/UnifiedLogs/Queries/ServiceFlowQueries/ServiceFlow.sql.ts
+++ b/apps/studio/components/interfaces/UnifiedLogs/Queries/ServiceFlowQueries/ServiceFlow.sql.ts
@@ -87,7 +87,6 @@ const getBaseEdgeServiceFlowQuery = (logId: string, serviceType: EdgeServiceType
apikey_payload.issuer = 'supabase' AND
apikey_payload.role IN ('anon', 'service_role')
THEN apikey_payload.role
- WHEN sb_apikey.invalid IS NOT NULL THEN ''
WHEN apikey_payload IS NOT NULL THEN ''
ELSE NULL
END as jwt_key_role,
@@ -120,21 +119,21 @@ const getBaseEdgeServiceFlowQuery = (logId: string, serviceType: EdgeServiceType
-- JWT data
apikey_payload.role as jwt_apikey_role,
apikey_payload.algorithm as jwt_apikey_algorithm,
- null as jwt_apikey_expires_at,
+ apikey_payload.expires_at as jwt_apikey_expires_at,
apikey_payload.issuer as jwt_apikey_issuer,
- null as jwt_apikey_signature_prefix,
+ apikey_payload.signature_prefix as jwt_apikey_signature_prefix,
null as jwt_apikey_key_id,
null as jwt_apikey_session_id,
- null as jwt_apikey_subject,
+ apikey_payload.subject as jwt_apikey_subject,
authorization_payload.role as jwt_auth_role,
authorization_payload.algorithm as jwt_auth_algorithm,
- null as jwt_auth_expires_at,
+ authorization_payload.expires_at as jwt_auth_expires_at,
authorization_payload.issuer as jwt_auth_issuer,
- null as jwt_auth_signature_prefix,
- null as jwt_auth_key_id,
- null as jwt_auth_session_id,
- null as jwt_auth_subject,
+ authorization_payload.signature_prefix as jwt_auth_signature_prefix,
+ authorization_payload.key_id as jwt_auth_key_id,
+ authorization_payload.session_id as jwt_auth_session_id,
+ authorization_payload.subject as jwt_auth_subject,
-- Storage specific data (included for all but only populated for storage)
edge_logs_response_headers.sb_gateway_mode as storage_edge_gateway_mode,
@@ -153,8 +152,8 @@ const getBaseEdgeServiceFlowQuery = (logId: string, serviceType: EdgeServiceType
left join unnest(edge_logs_request.cf) as edge_logs_cf
left join unnest(edge_logs_request.sb) as sb
left join unnest(sb.jwt) as jwt
- left join unnest(COALESCE(jwt.apikey, [])) as sb_apikey
- left join unnest(COALESCE(sb_apikey.payload, [])) as apikey_payload
+ left join unnest(COALESCE(jwt.apikey, [])) as apikey
+ left join unnest(COALESCE(apikey.payload, [])) as apikey_payload
left join unnest(COALESCE(jwt.authorization, [])) as auth
left join unnest(COALESCE(auth.payload, [])) as authorization_payload
left join unnest(COALESCE(sb.apikey, [])) as sb_apikey_outer
@@ -246,7 +245,6 @@ export const getEdgeFunctionServiceFlowQuery = (logId: string): string => {
apikey_payload.issuer = 'supabase' AND
apikey_payload.role IN ('anon', 'service_role')
THEN apikey_payload.role
- WHEN sb_apikey.invalid IS NOT NULL THEN ''
WHEN apikey_payload IS NOT NULL THEN ''
ELSE NULL
END as jwt_key_role,
@@ -273,21 +271,21 @@ export const getEdgeFunctionServiceFlowQuery = (logId: string): string => {
-- JWT data
apikey_payload.role as jwt_apikey_role,
apikey_payload.algorithm as jwt_apikey_algorithm,
- null as jwt_apikey_expires_at,
+ apikey_payload.expires_at as jwt_apikey_expires_at,
apikey_payload.issuer as jwt_apikey_issuer,
- null as jwt_apikey_signature_prefix,
+ apikey_payload.signature_prefix as jwt_apikey_signature_prefix,
null as jwt_apikey_key_id,
null as jwt_apikey_session_id,
- null as jwt_apikey_subject,
+ apikey_payload.subject as jwt_apikey_subject,
authorization_payload.role as jwt_auth_role,
authorization_payload.algorithm as jwt_auth_algorithm,
- null as jwt_auth_expires_at,
+ authorization_payload.expires_at as jwt_auth_expires_at,
authorization_payload.issuer as jwt_auth_issuer,
- null as jwt_auth_signature_prefix,
- null as jwt_auth_key_id,
- null as jwt_auth_session_id,
- null as jwt_auth_subject,
+ authorization_payload.signature_prefix as jwt_auth_signature_prefix,
+ authorization_payload.key_id as jwt_auth_key_id,
+ authorization_payload.session_id as jwt_auth_session_id,
+ authorization_payload.subject as jwt_auth_subject,
-- Function logs aggregation
function_logs_agg.function_log_count as function_log_count,
@@ -305,8 +303,8 @@ export const getEdgeFunctionServiceFlowQuery = (logId: string): string => {
left join unnest(fel_request.headers) as fel_request_headers
left join unnest(fel_request.sb) as sb
left join unnest(sb.jwt) as jwt
- left join unnest(COALESCE(jwt.apikey, [])) as sb_apikey
- left join unnest(COALESCE(sb_apikey.payload, [])) as apikey_payload
+ left join unnest(COALESCE(jwt.apikey, [])) as apikey
+ left join unnest(COALESCE(apikey.payload, [])) as apikey_payload
left join unnest(COALESCE(jwt.authorization, [])) as auth
left join unnest(COALESCE(auth.payload, [])) as authorization_payload
left join unnest(COALESCE(sb.apikey, [])) as sb_apikey_outer
diff --git a/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.fields.tsx b/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.fields.tsx
index b0a2fad0c040d..2b4fca614b74a 100644
--- a/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.fields.tsx
+++ b/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.fields.tsx
@@ -105,7 +105,7 @@ export const filterFields = [
defaultOpen: false,
options: [],
hasDynamicOptions: true,
- hasAsyncSearch: true,
+ hasAsyncSearch: false,
component: (props: Option) => {
return (
@@ -114,25 +114,6 @@ export const filterFields = [
)
},
},
- {
- label: 'Auth User',
- value: 'auth_user',
- type: 'checkbox',
- defaultOpen: false,
- options: [],
- hasDynamicOptions: true,
- hasAsyncSearch: true,
- component: (props: Option) => {
- return (
-
-
-
- {props.value}
-
-
- )
- },
- },
] satisfies DataTableFilterField[]
export const sheetFields = [
diff --git a/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.queries.ts b/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.queries.ts
index 984be6580c83f..da7854653649e 100644
--- a/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.queries.ts
+++ b/apps/studio/components/interfaces/UnifiedLogs/UnifiedLogs.queries.ts
@@ -239,21 +239,14 @@ const getEdgeLogsQuery = () => {
ELSE 'success'
END as level,
edge_logs_request.path as pathname,
- edge_logs_request.host as host,
null as event_message,
edge_logs_request.method as method,
- authorization_payload.role as api_role,
- COALESCE(sb.auth_user, null) as auth_user,
null as log_count,
null as logs
from edge_logs as el
cross join unnest(metadata) as edge_logs_metadata
cross join unnest(edge_logs_metadata.request) as edge_logs_request
cross join unnest(edge_logs_metadata.response) as edge_logs_response
- left join unnest(edge_logs_request.sb) as sb
- left join unnest(sb.jwt) as jwt
- left join unnest(jwt.authorization) as auth
- left join unnest(auth.payload) as authorization_payload
-- ONLY include logs where the path does not include /rest/
WHERE edge_logs_request.path NOT LIKE '%/rest/%'
@@ -279,21 +272,14 @@ const getPostgrestLogsQuery = () => {
ELSE 'success'
END as level,
edge_logs_request.path as pathname,
- edge_logs_request.host as host,
null as event_message,
edge_logs_request.method as method,
- authorization_payload.role as api_role,
- COALESCE(sb.auth_user, null) as auth_user,
null as log_count,
null as logs
from edge_logs as el
cross join unnest(metadata) as edge_logs_metadata
cross join unnest(edge_logs_metadata.request) as edge_logs_request
cross join unnest(edge_logs_metadata.response) as edge_logs_response
- left join unnest(edge_logs_request.sb) as sb
- left join unnest(sb.jwt) as jwt
- left join unnest(jwt.authorization) as auth
- left join unnest(auth.payload) as authorization_payload
-- ONLY include logs where the path includes /rest/
WHERE edge_logs_request.path LIKE '%/rest/%'
@@ -318,11 +304,8 @@ const getPostgresLogsQuery = () => {
ELSE null
END as level,
null as pathname,
- null as host,
event_message as event_message,
null as method,
- 'api_role' as api_role,
- null as auth_user,
null as log_count,
null as logs
from postgres_logs as pgl
@@ -348,21 +331,14 @@ const getEdgeFunctionLogsQuery = () => {
ELSE 'success'
END as level,
fel_request.pathname as pathname,
- fel_request.host as host,
COALESCE(function_logs_agg.last_event_message, '') as event_message,
fel_request.method as method,
- authorization_payload.role as api_role,
- COALESCE(sb.auth_user, null) as auth_user,
function_logs_agg.function_log_count as log_count,
function_logs_agg.logs as logs
from function_edge_logs as fel
cross join unnest(metadata) as fel_metadata
cross join unnest(fel_metadata.response) as fel_response
cross join unnest(fel_metadata.request) as fel_request
- left join unnest(fel_request.sb) as sb
- left join unnest(sb.jwt) as jwt
- left join unnest(jwt.authorization) as auth
- left join unnest(auth.payload) as authorization_payload
left join (
SELECT
fl_metadata.execution_id,
@@ -394,11 +370,8 @@ const getAuthLogsQuery = () => {
ELSE 'success'
END as level,
el_in_al_request.path as pathname,
- el_in_al_request.host as host,
null as event_message,
el_in_al_request.method as method,
- authorization_payload.role as api_role,
- COALESCE(sb.auth_user, null) as auth_user,
null as log_count,
null as logs
from auth_logs as al
@@ -409,10 +382,6 @@ const getAuthLogsQuery = () => {
cross join unnest (el_in_al_metadata.response) as el_in_al_response
cross join unnest (el_in_al_response.headers) as el_in_al_response_headers
cross join unnest (el_in_al_metadata.request) as el_in_al_request
- left join unnest(el_in_al_request.sb) as sb
- left join unnest(sb.jwt) as jwt
- left join unnest(jwt.authorization) as auth
- left join unnest(auth.payload) as authorization_payload
)
on al_metadata.request_id = el_in_al_response_headers.cf_ray
WHERE al_metadata.request_id is not null
@@ -436,21 +405,14 @@ const getSupabaseStorageLogsQuery = () => {
ELSE 'success'
END as level,
edge_logs_request.path as pathname,
- edge_logs_request.host as host,
null as event_message,
edge_logs_request.method as method,
- authorization_payload.role as api_role,
- COALESCE(sb.auth_user, null) as auth_user,
null as log_count,
null as logs
from edge_logs as el
cross join unnest(metadata) as edge_logs_metadata
cross join unnest(edge_logs_metadata.request) as edge_logs_request
cross join unnest(edge_logs_metadata.response) as edge_logs_response
- left join unnest(edge_logs_request.sb) as sb
- left join unnest(sb.jwt) as jwt
- left join unnest(jwt.authorization) as auth
- left join unnest(auth.payload) as authorization_payload
-- ONLY include logs where the path includes /storage/
WHERE edge_logs_request.path LIKE '%/storage/%'
`
@@ -492,11 +454,8 @@ SELECT
status,
level,
pathname,
- host,
event_message,
method,
- api_role,
- auth_user,
log_count,
logs
FROM unified_logs
@@ -562,19 +521,152 @@ ${facet}_count AS (
`.trim()
}
+export const getUnifiedLogsCountCTE = () => {
+ return `
+WITH unified_logs AS (
+ -- Edge logs (non-rest, non-storage)
+ select
+ id,
+ 'edge' as log_type,
+ CAST(edge_logs_response.status_code AS STRING) as status,
+ CASE
+ WHEN edge_logs_response.status_code BETWEEN 200 AND 299 THEN 'success'
+ WHEN edge_logs_response.status_code BETWEEN 400 AND 499 THEN 'warning'
+ WHEN edge_logs_response.status_code >= 500 THEN 'error'
+ ELSE 'success'
+ END as level,
+ edge_logs_request.path as pathname,
+ edge_logs_request.method as method
+ from edge_logs as el
+ cross join unnest(metadata) as edge_logs_metadata
+ cross join unnest(edge_logs_metadata.request) as edge_logs_request
+ cross join unnest(edge_logs_metadata.response) as edge_logs_response
+ WHERE edge_logs_request.path NOT LIKE '%/rest/%'
+ AND edge_logs_request.path NOT LIKE '%/storage/%'
+
+ union all
+
+ -- Postgrest logs
+ select
+ id,
+ 'postgrest' as log_type,
+ CAST(edge_logs_response.status_code AS STRING) as status,
+ CASE
+ WHEN edge_logs_response.status_code BETWEEN 200 AND 299 THEN 'success'
+ WHEN edge_logs_response.status_code BETWEEN 400 AND 499 THEN 'warning'
+ WHEN edge_logs_response.status_code >= 500 THEN 'error'
+ ELSE 'success'
+ END as level,
+ edge_logs_request.path as pathname,
+ edge_logs_request.method as method
+ from edge_logs as el
+ cross join unnest(metadata) as edge_logs_metadata
+ cross join unnest(edge_logs_metadata.request) as edge_logs_request
+ cross join unnest(edge_logs_metadata.response) as edge_logs_response
+ WHERE edge_logs_request.path LIKE '%/rest/%'
+
+ union all
+
+ -- Postgres logs
+ select
+ id,
+ 'postgres' as log_type,
+ CAST(pgl_parsed.sql_state_code AS STRING) as status,
+ CASE
+ WHEN pgl_parsed.error_severity = 'LOG' THEN 'success'
+ WHEN pgl_parsed.error_severity = 'WARNING' THEN 'warning'
+ WHEN pgl_parsed.error_severity = 'FATAL' THEN 'error'
+ WHEN pgl_parsed.error_severity = 'ERROR' THEN 'error'
+ ELSE null
+ END as level,
+ null as pathname,
+ null as method
+ from postgres_logs as pgl
+ cross join unnest(pgl.metadata) as pgl_metadata
+ cross join unnest(pgl_metadata.parsed) as pgl_parsed
+
+ union all
+
+ -- Edge function logs
+ select
+ id,
+ 'edge function' as log_type,
+ CAST(fel_response.status_code AS STRING) as status,
+ CASE
+ WHEN fel_response.status_code BETWEEN 200 AND 299 THEN 'success'
+ WHEN fel_response.status_code BETWEEN 400 AND 499 THEN 'warning'
+ WHEN fel_response.status_code >= 500 THEN 'error'
+ ELSE 'success'
+ END as level,
+ fel_request.pathname as pathname,
+ fel_request.method as method
+ from function_edge_logs as fel
+ cross join unnest(metadata) as fel_metadata
+ cross join unnest(fel_metadata.response) as fel_response
+ cross join unnest(fel_metadata.request) as fel_request
+
+ union all
+
+ -- Auth logs
+ select
+ al.id as id,
+ 'auth' as log_type,
+ CAST(el_in_al_response.status_code AS STRING) as status,
+ CASE
+ WHEN el_in_al_response.status_code BETWEEN 200 AND 299 THEN 'success'
+ WHEN el_in_al_response.status_code BETWEEN 400 AND 499 THEN 'warning'
+ WHEN el_in_al_response.status_code >= 500 THEN 'error'
+ ELSE 'success'
+ END as level,
+ el_in_al_request.path as pathname,
+ el_in_al_request.method as method
+ from auth_logs as al
+ cross join unnest(metadata) as al_metadata
+ left join (
+ edge_logs as el_in_al
+ cross join unnest (metadata) as el_in_al_metadata
+ cross join unnest (el_in_al_metadata.response) as el_in_al_response
+ cross join unnest (el_in_al_response.headers) as el_in_al_response_headers
+ cross join unnest (el_in_al_metadata.request) as el_in_al_request
+ )
+ on al_metadata.request_id = el_in_al_response_headers.cf_ray
+ WHERE al_metadata.request_id is not null
+
+ union all
+
+ -- Storage logs
+ select
+ id,
+ 'storage' as log_type,
+ CAST(edge_logs_response.status_code AS STRING) as status,
+ CASE
+ WHEN edge_logs_response.status_code BETWEEN 200 AND 299 THEN 'success'
+ WHEN edge_logs_response.status_code BETWEEN 400 AND 499 THEN 'warning'
+ WHEN edge_logs_response.status_code >= 500 THEN 'error'
+ ELSE 'success'
+ END as level,
+ edge_logs_request.path as pathname,
+ edge_logs_request.method as method
+ from edge_logs as el
+ cross join unnest(metadata) as edge_logs_metadata
+ cross join unnest(edge_logs_metadata.request) as edge_logs_request
+ cross join unnest(edge_logs_metadata.response) as edge_logs_response
+ WHERE edge_logs_request.path LIKE '%/storage/%'
+)
+ `
+}
+
export const getLogsCountQuery = (search: QuerySearchParamsType): string => {
const { finalWhere } = buildQueryConditions(search)
// Create a count query using the same unified logs CTE
const sql = `
-${getUnifiedLogsCTE()},
+${getUnifiedLogsCountCTE()},
${getFacetCountCTE({ search, facet: 'log_type' })},
${getFacetCountCTE({ search, facet: 'method' })},
${getFacetCountCTE({ search, facet: 'level' })},
${getFacetCountCTE({ search, facet: 'status' })},
-${getFacetCountCTE({ search, facet: 'host' })},
-${getFacetCountCTE({ search, facet: 'pathname' })},
-${getFacetCountCTE({ search, facet: 'auth_user' })}
+${getFacetCountCTE({ search, facet: 'pathname' })}
-- Get total count
SELECT 'total' as dimension, 'all' as value, COUNT(*) as count
@@ -603,18 +695,9 @@ SELECT dimension, value, count from status_count
UNION ALL
--- Get counts by host (exclude host filter to avoid self-filtering)
-SELECT dimension, value, count from host_count
-
-UNION ALL
-
-- Get counts by pathname (exclude pathname filter to avoid self-filtering)
SELECT dimension, value, count from pathname_count
-UNION ALL
-
--- Get counts by auth_user (exclude auth_user filter to avoid self-filtering)
-SELECT dimension, value, count from auth_user_count
`
return sql
diff --git a/apps/studio/components/layouts/LogsLayout/LogsSidebarMenuV2.tsx b/apps/studio/components/layouts/LogsLayout/LogsSidebarMenuV2.tsx
index b9f02d6b4a0a1..8f6d7571d4722 100644
--- a/apps/studio/components/layouts/LogsLayout/LogsSidebarMenuV2.tsx
+++ b/apps/studio/components/layouts/LogsLayout/LogsSidebarMenuV2.tsx
@@ -91,7 +91,7 @@ export function LogsSidebarMenuV2() {
const { ref } = useParams() as { ref: string }
const warehouseEnabled = useFlag('warehouse')
- const isUnifiedLogsPreviewAvailable = useFlag('unifiedLogs')
+ const unifiedLogsFlagEnabled = useFlag('unifiedLogs')
const { selectFeaturePreview } = useFeaturePreviewModal()
const { enable: enableUnifiedLogs } = useUnifiedLogsPreview()
@@ -115,6 +115,11 @@ export function LogsSidebarMenuV2() {
const { plan: orgPlan, isLoading: isOrgPlanLoading } = useCurrentOrgPlan()
const isFreePlan = !isOrgPlanLoading && orgPlan?.id === 'free'
+ const isUnifiedLogsPreviewAvailable =
+ unifiedLogsFlagEnabled &&
+ !isOrgPlanLoading &&
+ ['team', 'enterprise'].includes(orgPlan?.id ?? '')
+
const { data: savedQueriesRes, isLoading: savedQueriesLoading } = useContentQuery({
projectRef: ref,
type: 'log_sql',
@@ -224,6 +229,19 @@ export function LogsSidebarMenuV2() {
return (
+ Coming soon}
+ title="New logs"
+ description="Get early access"
+ actions={
+
+
+
+ }
+ />
{isUnifiedLogsPreviewAvailable && (
getFilterValue({ value, search, keywords, currentWord })
}
+ onKeyDown={(e) => {
+ // Stop arrow key navigation from propagating to parent components
+ if (e.key === 'ArrowUp' || e.key === 'ArrowDown') {
+ e.stopPropagation()
+ }
+ }}
>
(
({ signal }) => getUnifiedLogInspection({ projectRef, logId, type, search }, signal),
{
enabled: enabled && typeof projectRef !== 'undefined',
+ ...UNIFIED_LOGS_QUERY_OPTIONS,
...options,
}
)
diff --git a/apps/studio/lib/ai/tool-filter.test.ts b/apps/studio/lib/ai/tool-filter.test.ts
index 03b12fe66f365..4ad47d1cf0699 100644
--- a/apps/studio/lib/ai/tool-filter.test.ts
+++ b/apps/studio/lib/ai/tool-filter.test.ts
@@ -9,14 +9,12 @@ import {
createPrivacyMessageTool,
toolSetValidationSchema,
transformToolResult,
- DatabaseExtension,
} from './tool-filter'
describe('TOOL_CATEGORY_MAP', () => {
it('should categorize tools correctly', () => {
expect(TOOL_CATEGORY_MAP['display_query']).toBe(TOOL_CATEGORIES.UI)
expect(TOOL_CATEGORY_MAP['list_tables']).toBe(TOOL_CATEGORIES.SCHEMA)
- expect(TOOL_CATEGORY_MAP['get_logs']).toBe(TOOL_CATEGORIES.LOG)
})
})
@@ -35,9 +33,7 @@ describe('tool allowance by opt-in level', () => {
list_edge_functions: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
list_branches: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
// Log tools
- get_logs: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
get_advisors: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
- get_log_counts: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
} as unknown as ToolSet
const filtered = filterToolsByOptInLevel(mockTools, optInLevel as any)
@@ -64,7 +60,6 @@ describe('tool allowance by opt-in level', () => {
expect(tools).not.toContain('list_extensions')
expect(tools).not.toContain('list_edge_functions')
expect(tools).not.toContain('list_branches')
- expect(tools).not.toContain('get_logs')
expect(tools).not.toContain('execute_sql')
})
@@ -78,9 +73,7 @@ describe('tool allowance by opt-in level', () => {
expect(tools).toContain('list_edge_functions')
expect(tools).toContain('list_branches')
expect(tools).toContain('search_docs')
- expect(tools).not.toContain('get_logs')
expect(tools).not.toContain('get_advisors')
- expect(tools).not.toContain('get_log_counts')
expect(tools).not.toContain('execute_sql')
})
@@ -94,9 +87,7 @@ describe('tool allowance by opt-in level', () => {
expect(tools).toContain('list_edge_functions')
expect(tools).toContain('list_branches')
expect(tools).toContain('search_docs')
- expect(tools).toContain('get_logs')
expect(tools).toContain('get_advisors')
- expect(tools).toContain('get_log_counts')
expect(tools).not.toContain('execute_sql')
})
@@ -110,9 +101,7 @@ describe('tool allowance by opt-in level', () => {
expect(tools).toContain('list_edge_functions')
expect(tools).toContain('list_branches')
expect(tools).toContain('search_docs')
- expect(tools).toContain('get_logs')
expect(tools).toContain('get_advisors')
- expect(tools).toContain('get_log_counts')
expect(tools).not.toContain('execute_sql')
})
})
@@ -130,9 +119,7 @@ describe('filterToolsByOptInLevel', () => {
list_branches: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
search_docs: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
// Log tools
- get_logs: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
get_advisors: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
- get_log_counts: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
// Unknown tool - should be filtered out entirely
some_other_tool: { execute: vitest.fn().mockResolvedValue({ status: 'success' }) },
} as unknown as ToolSet
@@ -186,9 +173,7 @@ describe('filterToolsByOptInLevel', () => {
'list_extensions',
'list_edge_functions',
'list_branches',
- 'get_logs',
'get_advisors',
- 'get_log_counts',
])
})
@@ -200,16 +185,14 @@ describe('filterToolsByOptInLevel', () => {
'list_extensions',
'list_edge_functions',
'list_branches',
- 'get_logs',
'get_advisors',
- 'get_log_counts',
])
})
it('should stub log tools for schema opt-in level', async () => {
const tools = filterToolsByOptInLevel(mockTools, 'schema')
- await expectStubsFor(tools, ['get_logs', 'get_advisors', 'get_log_counts'])
+ await expectStubsFor(tools, ['get_advisors'])
})
// No execute_sql tool, so nothing additional to stub for schema_and_log opt-in level
@@ -293,7 +276,6 @@ describe('toolSetValidationSchema', () => {
it('should accept subset of known tools', () => {
const validSubset = {
list_tables: { parameters: z.object({}), execute: vitest.fn() },
- get_logs: { parameters: z.object({}), execute: vitest.fn() },
display_query: { parameters: z.object({}), execute: vitest.fn() },
}
@@ -327,13 +309,11 @@ describe('toolSetValidationSchema', () => {
list_extensions: { parameters: z.object({}), execute: vitest.fn() },
list_edge_functions: { parameters: z.object({}), execute: vitest.fn() },
list_branches: { parameters: z.object({}), execute: vitest.fn() },
- get_logs: { parameters: z.object({}), execute: vitest.fn() },
search_docs: { parameters: z.object({}), execute: vitest.fn() },
get_advisors: { parameters: z.object({}), execute: vitest.fn() },
display_query: { parameters: z.object({}), execute: vitest.fn() },
display_edge_function: { parameters: z.object({}), execute: vitest.fn() },
rename_chat: { parameters: z.object({}), execute: vitest.fn() },
- get_log_counts: { parameters: z.object({}), execute: vitest.fn() },
}
const validationResult = toolSetValidationSchema.safeParse(allExpectedTools)
diff --git a/apps/studio/lib/ai/tool-filter.ts b/apps/studio/lib/ai/tool-filter.ts
index e16e52b27f3af..ac07b32bb100e 100644
--- a/apps/studio/lib/ai/tool-filter.ts
+++ b/apps/studio/lib/ai/tool-filter.ts
@@ -23,7 +23,6 @@ export const toolSetValidationSchema = z.record(
'list_extensions',
'list_edge_functions',
'list_branches',
- 'get_logs',
'search_docs',
'get_advisors',
@@ -31,7 +30,6 @@ export const toolSetValidationSchema = z.record(
'display_query',
'display_edge_function',
'rename_chat',
- 'get_log_counts',
'getSchemaTables',
'getRlsKnowledge',
'getFunctions',
@@ -98,9 +96,7 @@ export const TOOL_CATEGORY_MAP: Record = {
list_branches: TOOL_CATEGORIES.SCHEMA,
// Log tools - MCP and local
- get_logs: TOOL_CATEGORIES.LOG,
get_advisors: TOOL_CATEGORIES.LOG,
- get_log_counts: TOOL_CATEGORIES.LOG,
}
/**
diff --git a/apps/studio/pages/api/ai/sql/generate-v4.ts b/apps/studio/pages/api/ai/sql/generate-v4.ts
index 1ff811f27819a..989cbaf554831 100644
--- a/apps/studio/pages/api/ai/sql/generate-v4.ts
+++ b/apps/studio/pages/api/ai/sql/generate-v4.ts
@@ -12,9 +12,6 @@ import { getAiOptInLevel } from 'hooks/misc/useOrgOptedIntoAi'
import { getModel } from 'lib/ai/model'
import apiWrapper from 'lib/api/apiWrapper'
import { queryPgMetaSelfHosted } from 'lib/self-hosted'
-import { getUnifiedLogsChart } from 'data/logs/unified-logs-chart-query'
-import { getUnifiedLogs } from 'data/logs/unified-logs-infinite-query'
-import { QuerySearchParamsType } from 'components/interfaces/UnifiedLogs/UnifiedLogs.types'
import { createSupabaseMCPClient } from 'lib/ai/supabase-mcp'
import { filterToolsByOptInLevel, toolSetValidationSchema } from 'lib/ai/tool-filter'
import { getTools } from './tools'
@@ -170,188 +167,6 @@ async function handlePost(req: NextApiRequest, res: NextApiResponse) {
return { status: 'Tool call sent to client for rendering.' }
},
}),
- get_log_counts: tool({
- description:
- 'Get log counts aggregated by time buckets to understand system health and activity levels. Returns success, warning, and error counts over time. Can filter by log types (edge, auth, postgres, etc.) and levels.',
- parameters: z.object({
- dateStart: z
- .string()
- .optional()
- .describe('Start date as ISO string (defaults to 1 hour ago)'),
- dateEnd: z.string().optional().describe('End date as ISO string (defaults to now)'),
- level: z
- .array(z.enum(['success', 'warning', 'error']))
- .optional()
- .describe('Filter by log levels'),
- log_type: z
- .array(
- z.enum([
- 'postgres',
- 'edge_function',
- 'auth',
- 'postgrest',
- 'storage',
- 'edge',
- 'function_events',
- 'postgres_upgrade',
- 'supavisor',
- ])
- )
- .optional()
- .describe('Filter by log types (e.g., ["edge"] for edge logs only)'),
- }),
- execute: async (args) => {
- try {
- let dateArray: Date[] | null = null
- if (args.dateStart && args.dateEnd) {
- dateArray = [new Date(args.dateStart), new Date(args.dateEnd)]
- }
-
- const search: QuerySearchParamsType = {
- date: dateArray,
- level: args.level || null,
- log_type: args.log_type || null,
- latency: null,
- 'timing.dns': null,
- 'timing.connection': null,
- 'timing.tls': null,
- 'timing.ttfb': null,
- 'timing.transfer': null,
- status: null,
- regions: null,
- method: null,
- host: null,
- pathname: null,
- sort: null,
- size: 40,
- start: 0,
- direction: 'next',
- cursor: new Date(),
- id: null,
- }
-
- let headers = new Headers()
- if (authorization) headers.set('Authorization', authorization)
-
- const chartData = await getUnifiedLogsChart({ projectRef, search }, undefined, headers)
-
- const totalSuccess = chartData.reduce((sum: number, point) => sum + point.success, 0)
- const totalWarning = chartData.reduce((sum: number, point) => sum + point.warning, 0)
- const totalError = chartData.reduce((sum: number, point) => sum + point.error, 0)
-
- return {
- status: 'success',
- data: chartData,
- summary: `Found ${chartData.length} time buckets. Total: ${totalSuccess} success, ${totalWarning} warning, ${totalError} error logs`,
- totals: {
- success: totalSuccess,
- warning: totalWarning,
- error: totalError,
- },
- }
- } catch (error) {
- return {
- status: 'error',
- error: error instanceof Error ? error.message : 'Unknown error occurred',
- }
- }
- },
- }),
- get_logs: tool({
- description:
- 'Get detailed log entries for analysis and debugging. Use this after get_log_counts to examine specific logs during time periods with errors or unusual activity.',
- parameters: z.object({
- dateStart: z
- .string()
- .optional()
- .describe('Start date as ISO string (defaults to 1 hour ago)'),
- dateEnd: z.string().optional().describe('End date as ISO string (defaults to now)'),
- level: z
- .array(z.enum(['success', 'warning', 'error']))
- .optional()
- .describe('Filter by log levels'),
- log_type: z
- .array(
- z.enum([
- 'postgres',
- 'edge_function',
- 'auth',
- 'postgrest',
- 'storage',
- 'edge',
- 'function_events',
- 'postgres_upgrade',
- 'supavisor',
- ])
- )
- .optional()
- .describe('Filter by log types'),
- limit: z
- .number()
- .min(1)
- .max(20)
- .default(10)
- .describe('Maximum number of logs to return (1-100, defaults to 20)'),
- }),
- execute: async (args) => {
- try {
- let dateArray: Date[] | null = null
- if (args.dateStart && args.dateEnd) {
- dateArray = [new Date(args.dateStart), new Date(args.dateEnd)]
- }
-
- const search: QuerySearchParamsType = {
- date: dateArray,
- level: args.level || null,
- log_type: args.log_type || null,
- latency: null,
- 'timing.dns': null,
- 'timing.connection': null,
- 'timing.tls': null,
- 'timing.ttfb': null,
- 'timing.transfer': null,
- status: null,
- regions: null,
- method: null,
- host: null,
- pathname: null,
- sort: null,
- size: 40,
- start: 0,
- direction: 'next',
- cursor: new Date(),
- id: null,
- }
-
- let headers = new Headers()
- if (authorization) headers.set('Authorization', authorization)
-
- const logsData = await getUnifiedLogs(
- {
- projectRef,
- search,
- },
- undefined,
- headers
- )
-
- const logs = logsData.data.slice(0, args.limit)
-
- return {
- status: 'success',
- data: logs,
- summary: `Found ${logs.length} log entries. Showing details for analysis.`,
- totalFetched: logs.length,
- hasMore: logsData.nextCursor !== null,
- }
- } catch (error) {
- return {
- status: 'error',
- error: error instanceof Error ? error.message : 'Unknown error occurred',
- }
- }
- },
- }),
rename_chat: tool({
description: `Rename the current chat session when the current chat name doesn't describe the conversation topic.`,
parameters: z.object({
@@ -474,15 +289,6 @@ async function handlePost(req: NextApiRequest, res: NextApiResponse) {
- **If \`execute_sql\` is NOT available**: Explain the issue and provide the corrected SQL using \`display_query\` with \`sql\`, \`label\`, and \`runQuery: true\`. Include \`view\`, \`xAxis\`, \`yAxis\` if the corrected query might return visualizable data.
- **If debugging a WRITE/DDL query**: Explain the issue and provide the corrected SQL using \`display_query\` with \`sql\`, \`label\`, and \`runQuery: false\`. Include \`view\`, \`xAxis\`, \`yAxis\` if the corrected query might return visualizable data.
- # Supabase Health & Debugging
- - **General Status**:
- - **If \`get_log_counts\`, \`get_logs\`, \`list_tables\`, \`list_extensions\` are available**: Use \`get_log_counts\` first to get a high-level view of system health by checking success/warning/error counts over time. You can filter by specific log types (e.g., \`log_type: ["edge"]\` for edge logs, \`log_type: ["postgres"]\` for database logs). Then use \`get_logs\` and schema tools to provide a detailed summary overview of the project's health (check recent errors/activity for relevant services like 'postgres', 'api', 'auth').
- - **If tools are NOT available**: Ask the user to check their Supabase dashboard or logs for project health information.
- - **Service Errors**:
- - **If \`get_log_counts\` and \`get_logs\` are available**: Start with \`get_log_counts\` to understand the overall error patterns and timeframes. Use log type filtering to focus on specific services (e.g., \`log_type: ["edge"]\` for API errors, \`log_type: ["postgres"]\` for database errors). Then use \`get_logs\` to dive deeper into specific errors. When facing specific errors related to the database, Edge Functions, or other Supabase services, explain the problem and use the \`get_logs\` tool, specifying the relevant service type (e.g., 'postgres', 'edge functions', 'api') to retrieve logs and diagnose the issue. Briefly summarize the relevant log information in your text response before suggesting a fix.
- - **If only \`get_logs\` is available**: Use the \`get_logs\` tool directly to retrieve logs for the service experiencing errors.
- - **If tools are NOT available**: Ask the user to provide relevant logs for the service experiencing errors.
-
# SQL Style:
- Generated SQL must be valid Postgres SQL.
- Always use double apostrophes for escaped single quotes (e.g., 'Night''s watch').
diff --git a/apps/studio/pages/project/[ref]/logs/index.tsx b/apps/studio/pages/project/[ref]/logs/index.tsx
index 92ac4e81db75e..8454db4cfaae9 100644
--- a/apps/studio/pages/project/[ref]/logs/index.tsx
+++ b/apps/studio/pages/project/[ref]/logs/index.tsx
@@ -8,12 +8,15 @@ import DefaultLayout from 'components/layouts/DefaultLayout'
import LogsLayout from 'components/layouts/LogsLayout/LogsLayout'
import ProjectLayout from 'components/layouts/ProjectLayout/ProjectLayout'
import { useLocalStorageQuery } from 'hooks/misc/useLocalStorage'
+import { useSelectedOrganization } from 'hooks/misc/useSelectedOrganization'
import type { NextPageWithLayout } from 'types'
export const LogPage: NextPageWithLayout = () => {
const router = useRouter()
const { ref } = useParams()
const { hasLoaded } = useContext(FeatureFlagContext)
+
+ const org = useSelectedOrganization()
const { isEnabled: isUnifiedLogsEnabled } = useUnifiedLogsPreview()
const [lastVisitedLogsPage] = useLocalStorageQuery(
@@ -22,10 +25,10 @@ export const LogPage: NextPageWithLayout = () => {
)
useEffect(() => {
- if (hasLoaded && !isUnifiedLogsEnabled) {
+ if (hasLoaded && !!org && !isUnifiedLogsEnabled) {
router.replace(`/project/${ref}/logs/${lastVisitedLogsPage}`)
}
- }, [router, hasLoaded, lastVisitedLogsPage, ref, isUnifiedLogsEnabled])
+ }, [router, hasLoaded, org, lastVisitedLogsPage, ref, isUnifiedLogsEnabled])
// Handle redirects when unified logs preview flag changes
useEffect(() => {
diff --git a/apps/www/_blog/2025-03-29-postgres-language-server.mdx b/apps/www/_blog/2025-03-29-postgres-language-server.mdx
index ece076ee69761..9ee33ba33dbb4 100644
--- a/apps/www/_blog/2025-03-29-postgres-language-server.mdx
+++ b/apps/www/_blog/2025-03-29-postgres-language-server.mdx
@@ -12,7 +12,7 @@ toc_depth: 3
author: philipp,julian
image: lw14-postgres-language-server/postgres-language-server-og.png
thumb: lw14-postgres-language-server/postgres-language-server-thumb.png
-launchweek: 14
+launchweek: '14'
---
Today we’re announcing the initial release of Postgres Language Server - a Language Server Protocol (LSP) implementation for Postgres and a collection of language tools focusing on reliable SQL tooling and developer experience.
diff --git a/apps/www/_blog/2025-03-31-clerk-tpa-pricing.mdx b/apps/www/_blog/2025-03-31-clerk-tpa-pricing.mdx
index d6536864eaa7b..0c8158855ef73 100644
--- a/apps/www/_blog/2025-03-31-clerk-tpa-pricing.mdx
+++ b/apps/www/_blog/2025-03-31-clerk-tpa-pricing.mdx
@@ -12,7 +12,7 @@ toc_depth: 3
author: stojan
image: lw14-clerk-tpa-pricing/clerk-tpa-pricing-og.png
thumb: lw14-clerk-tpa-pricing/clerk-tpa-pricing-thumb.png
-launchweek: 14
+launchweek: '14'
---
Today we're expanding our official Third-party Auth integrations to include [Clerk](https://clerk.com).
diff --git a/apps/www/_blog/2025-03-31-supabase-ui-library.mdx b/apps/www/_blog/2025-03-31-supabase-ui-library.mdx
index e6d6ff9fe3d5a..13f356ed052e8 100644
--- a/apps/www/_blog/2025-03-31-supabase-ui-library.mdx
+++ b/apps/www/_blog/2025-03-31-supabase-ui-library.mdx
@@ -11,7 +11,7 @@ tags:
- design
date: '2025-03-31T00:00:01'
toc_depth: 3
-launchweek: 14
+launchweek: '14'
---
We're excited to release an [official Supabase UI Library](https://supabase.com/ui)—a collection of ready-to-use components built on top of [shadcn/ui](https://ui.shadcn.com/). Designed for flexibility, these components can be dropped into any Next.js, React Router, TanStack Start, or plain React app.
diff --git a/apps/www/_blog/2025-04-01-automatic-embeddings.mdx b/apps/www/_blog/2025-04-01-automatic-embeddings.mdx
index 937ee7f01e216..d67ea05fa68a6 100644
--- a/apps/www/_blog/2025-04-01-automatic-embeddings.mdx
+++ b/apps/www/_blog/2025-04-01-automatic-embeddings.mdx
@@ -12,7 +12,7 @@ toc_depth: 3
author: gregnr
image: lw14-automatic-embeddings/automatic-embeddings-og.png
thumb: lw14-automatic-embeddings/automatic-embeddings-thumb.png
-launchweek: 14
+launchweek: '14'
---
Today we’re releasing [automatic embeddings](/docs/guides/ai/automatic-embeddings) - automate embedding generation and updates using Supabase [Vector](/modules/vector), [Queues](/modules/queues), [Cron](/modules/cron), and [pg_net](/docs/guides/database/extensions/pg_net) extension, and [Edge Functions](/edge-functions).
diff --git a/apps/www/_blog/2025-04-01-supabase-edge-functions-deploy-dashboard-deno-2-1.mdx b/apps/www/_blog/2025-04-01-supabase-edge-functions-deploy-dashboard-deno-2-1.mdx
index af6e7c249f35b..d941752723b4f 100644
--- a/apps/www/_blog/2025-04-01-supabase-edge-functions-deploy-dashboard-deno-2-1.mdx
+++ b/apps/www/_blog/2025-04-01-supabase-edge-functions-deploy-dashboard-deno-2-1.mdx
@@ -12,7 +12,7 @@ tags:
- functions
date: '2025-04-01T00:00:01'
toc_depth: 3
-launchweek: 14
+launchweek: '14'
---
Now you can create, test, edit, and deploy Edge Functions directly from the Supabase Dashboard. We're also releasing Deno 2.1 Preview today but more on that later.
diff --git a/apps/www/_blog/2025-04-02-realtime-broadcast-from-database.mdx b/apps/www/_blog/2025-04-02-realtime-broadcast-from-database.mdx
index 39938221d9c2c..7c6a0f2b0b186 100644
--- a/apps/www/_blog/2025-04-02-realtime-broadcast-from-database.mdx
+++ b/apps/www/_blog/2025-04-02-realtime-broadcast-from-database.mdx
@@ -12,7 +12,7 @@ tags:
- realtime
date: '2025-04-02T00:00:01'
toc_depth: 3
-launchweek: 14
+launchweek: '14'
---
Now you can use Realtime Broadcast to scale database changes sent to clients with [Broadcast from Database](/docs/guides/realtime/broadcast#broadcast-from-the-database).
diff --git a/apps/www/_blog/2025-04-02-tabs-dashboard-updates.mdx b/apps/www/_blog/2025-04-02-tabs-dashboard-updates.mdx
index d13db1769dd69..d1f252d00e22f 100644
--- a/apps/www/_blog/2025-04-02-tabs-dashboard-updates.mdx
+++ b/apps/www/_blog/2025-04-02-tabs-dashboard-updates.mdx
@@ -11,7 +11,7 @@ tags:
- studio
date: '2025-04-02T00:00:00'
toc_depth: 3
-launchweek: 14
+launchweek: '14'
---
We've had a busy few months working on Studio improvements and new features—big and small—to help you build, debug, and ship faster.
diff --git a/apps/www/_blog/2025-04-03-declarative-schemas.mdx b/apps/www/_blog/2025-04-03-declarative-schemas.mdx
index 5c51298947a20..1294aebe09668 100644
--- a/apps/www/_blog/2025-04-03-declarative-schemas.mdx
+++ b/apps/www/_blog/2025-04-03-declarative-schemas.mdx
@@ -11,7 +11,7 @@ tags:
- launch-week
date: '2025-04-03T00:00:00'
toc_depth: 3
-launchweek: 14
+launchweek: '14'
---
Today we’re releasing declarative schemas to simplify managing and maintaining complex database schemas. With declarative schemas, you can define your database structure in a clear, centralized, and version-controlled manner.
diff --git a/apps/www/_blog/2025-04-04-data-api-nearest-read-replica.mdx b/apps/www/_blog/2025-04-04-data-api-nearest-read-replica.mdx
index f4853d55105b9..c657550e13808 100644
--- a/apps/www/_blog/2025-04-04-data-api-nearest-read-replica.mdx
+++ b/apps/www/_blog/2025-04-04-data-api-nearest-read-replica.mdx
@@ -12,7 +12,7 @@ tags:
- postgrest
date: '2025-04-04T00:00:00'
toc_depth: 3
-launchweek: 14
+launchweek: '14'
---
Today we’re releasing Data API requests routing to the nearest Read Replica by extending our [API load balancer](/docs/guides/platform/read-replicas#api-load-balancer) to handle geo-routing.
diff --git a/apps/www/_blog/2025-04-04-mcp-server.mdx b/apps/www/_blog/2025-04-04-mcp-server.mdx
index 7d0556d18169c..e2e5dd88f088c 100644
--- a/apps/www/_blog/2025-04-04-mcp-server.mdx
+++ b/apps/www/_blog/2025-04-04-mcp-server.mdx
@@ -10,7 +10,7 @@ tags:
- launch-week
date: '2025-04-04T00:00:01'
toc_depth: 3
-launchweek: 14
+launchweek: '14'
---
We are launching an official [Supabase MCP server](https://github.com/supabase-community/supabase-mcp). You can use this server to connect your favorite AI tools (such as [Cursor](https://www.cursor.com/) or [Claude](https://claude.ai/download)) directly with Supabase.
diff --git a/apps/www/_blog/2025-07-14-jwt-signing-keys.mdx b/apps/www/_blog/2025-07-14-jwt-signing-keys.mdx
index c044c13cabd55..168a61242e0a5 100644
--- a/apps/www/_blog/2025-07-14-jwt-signing-keys.mdx
+++ b/apps/www/_blog/2025-07-14-jwt-signing-keys.mdx
@@ -11,7 +11,7 @@ toc_depth: 3
author: stojan
image: launch-week-15/day-1-jwt-signing-keys/og.jpg
thumb: launch-week-15/day-1-jwt-signing-keys/thumb.jpg
-launchweek: 15
+launchweek: '15'
---
Today we're announcing some long awaited changes in Supabase:
diff --git a/apps/www/_blog/2025-07-14-supabase-ui-platform-kit.mdx b/apps/www/_blog/2025-07-14-supabase-ui-platform-kit.mdx
index 6a4b5962af726..d8dd21a88c0d4 100644
--- a/apps/www/_blog/2025-07-14-supabase-ui-platform-kit.mdx
+++ b/apps/www/_blog/2025-07-14-supabase-ui-platform-kit.mdx
@@ -12,7 +12,7 @@ toc_depth: 3
author: saxon_fletcher
image: launch-week-15/day-1-ui-platform-kit/og.jpg
thumb: launch-week-15/day-1-ui-platform-kit/thumb.png
-launchweek: 15
+launchweek: '15'
---
Today we’re releasing our Platform Kit, new components in the Supabase UI Library that makes it incredibly easy to build platforms on top of Supabase.
diff --git a/apps/www/_blog/2025-07-15-analytics-buckets.mdx b/apps/www/_blog/2025-07-15-analytics-buckets.mdx
index 143ab2af2890d..20e163ea139ac 100644
--- a/apps/www/_blog/2025-07-15-analytics-buckets.mdx
+++ b/apps/www/_blog/2025-07-15-analytics-buckets.mdx
@@ -12,7 +12,7 @@ toc_depth: 3
author: oli_rice,fabrizio
image: launch-week-15/day-2-analytics-buckets/og.jpg
thumb: launch-week-15/day-2-analytics-buckets/thumb.png
-launchweek: 15
+launchweek: '15'
---
Today we're launching **Supabase Analytics Buckets** in private alpha. These are a new kind of storage bucket optimized for analytics, with built-in support for the [Apache Iceberg](https://iceberg.apache.org/) table format.
diff --git a/apps/www/_blog/2025-07-15-figma-make-support-for-supabase.mdx b/apps/www/_blog/2025-07-15-figma-make-support-for-supabase.mdx
index ddde523d2c007..83f928c08e368 100644
--- a/apps/www/_blog/2025-07-15-figma-make-support-for-supabase.mdx
+++ b/apps/www/_blog/2025-07-15-figma-make-support-for-supabase.mdx
@@ -11,7 +11,7 @@ toc_depth: 2
author: prashant
image: launch-week-15/day-2-figma-make/og.jpg
thumb: launch-week-15/day-2-figma-make/thumb.png
-launchweek: 15
+launchweek: '15'
---
With [today’s update to Figma Make](https://www.figma.com/make), you can now build **richer prototypes and fully functional web applications**, complete with real backend logic using Supabase—all without leaving Figma.
diff --git a/apps/www/_blog/2025-07-15-stripe-engine-as-sync-library.mdx b/apps/www/_blog/2025-07-15-stripe-engine-as-sync-library.mdx
index 8f4d964139115..9fabde9dc13cd 100644
--- a/apps/www/_blog/2025-07-15-stripe-engine-as-sync-library.mdx
+++ b/apps/www/_blog/2025-07-15-stripe-engine-as-sync-library.mdx
@@ -12,7 +12,7 @@ toc_depth: 3
author: kevcodez
image: launch-week-15/day-2-stripe-engine/og.jpg
thumb: launch-week-15/day-2-stripe-engine/thumb.png
-launchweek: 15
+launchweek: '15'
---
We're excited to announce that [`stripe-sync-engine`](https://github.com/supabase/stripe-sync-engine) is now available as a standalone npm package: [`@supabase/stripe-sync-engine`](https://www.npmjs.com/package/@supabase/stripe-sync-engine)!
diff --git a/apps/www/_blog/2025-07-16-branching-2-0.mdx b/apps/www/_blog/2025-07-16-branching-2-0.mdx
index 22e85de909b54..3d1342e0d0d42 100644
--- a/apps/www/_blog/2025-07-16-branching-2-0.mdx
+++ b/apps/www/_blog/2025-07-16-branching-2-0.mdx
@@ -12,7 +12,7 @@ toc_depth: 3
author: saxon_fletcher
image: launch-week-15/day-3-branching-2-0/og.jpg
thumb: launch-week-15/day-3-branching-2-0/thumb.png
-launchweek: 15
+launchweek: '15'
---
Branching has been a part of Supabase for some time now, a way for you to experiment or build out new features without affecting your production environment. It requires you to connect your Supabase project to a GitHub repository which automates many parts of the workflow, but this also alienates those who prefer not to use Git. Today, we are announcing Branching 2.0 which removes the Git requirement and makes it super simple to spin up new branches.
diff --git a/apps/www/_blog/2025-07-16-improved-security-controls.mdx b/apps/www/_blog/2025-07-16-improved-security-controls.mdx
index 47539eaaca456..0f23203c6596b 100644
--- a/apps/www/_blog/2025-07-16-improved-security-controls.mdx
+++ b/apps/www/_blog/2025-07-16-improved-security-controls.mdx
@@ -13,7 +13,7 @@ toc_depth: 3
author: staaldraad,hieu,filipe
image: launch-week-15/day-3-security-controls/og.jpg
thumb: launch-week-15/day-3-security-controls/thumb.png
-launchweek: 15
+launchweek: '15'
---
Today we are launching the foundations of several security features we plan to build on in the upcoming months.
diff --git a/apps/www/_blog/2025-07-17-algolia-connector-for-supabase.mdx b/apps/www/_blog/2025-07-17-algolia-connector-for-supabase.mdx
new file mode 100644
index 0000000000000..c0f0b33bd3575
--- /dev/null
+++ b/apps/www/_blog/2025-07-17-algolia-connector-for-supabase.mdx
@@ -0,0 +1,111 @@
+---
+title: 'Algolia Connector for Supabase'
+description: 'Bring lightning-fast search to your Supabase apps, with no code required.'
+categories:
+ - launch-week
+tags:
+ - launch-week
+ - algolia
+date: '2025-07-17:00:00'
+toc_depth: 2
+author: prashant
+image: launch-week-15/day-4-algolia-connector/og.png
+thumb: launch-week-15/day-4-algolia-connector/thumb.png
+launchweek: '15'
+---
+
+Today, Algolia is launching a new Supabase Connector, making it easier than ever to index your Postgres data and power world-class search experiences without writing a single line of code.
+
+
+
+
+
+With just a few clicks, you can connect your Supabase database to Algolia, select the tables you want to sync, and configure how often the data updates. Algolia handles the rest. You get a fast, reliable, scalable search index, and your team gets to focus on building.
+
+## Partners Integrating with Supabase
+
+Supabase is more than a backend. It is a growing ecosystem of tools that work well together so developers can build faster, scale more easily, and stay focused on their product.
+
+Partners like Algolia bring best-in-class functionality (in Algolia’s case, fast and flexible search) directly into the Supabase workflow. For developers, that means fewer workarounds, no glue code, and a smoother path from idea to production.
+
+For partners, integrating with Supabase means more than technical compatibility. It means product visibility to tens of thousands of active projects. Supabase regularly features integrations in our docs, Launch Weeks, blog, and community programs. Developers discover and adopt your product in the context where they’re already building.
+
+Read on to see how the Algolia Connector for Supabase works.
+
+## How to use Algolia Connector for Supabase
+
+To get started with Algolia’s connector, prepare the data in your Supabase database, create Supabase as a source in Algolia’s dashboard, set up your Algolia index and configure your sync job. Here’s how you can [get started](https://www.algolia.com/doc/guides/sending-and-managing-data/send-and-update-your-data/connectors/supabase) in just a few minutes.
+
+### 1. Prepare your data in Supabase
+
+Before you connect to Algolia, you will want to ensure all the fields you want to make searchable are in one place. If the fields you want to index live in more than one table, you can stitch them together in a [Postgres View](/docs/guides/graphql/views), allowing Algolia’s connector to get all the data you want to index.
+
+For example, imagine you’re creating an app that allows you to easily find a movie to watch. You want to search across movie titles, genres, rating and actors. However, movies and actors are in two separate tables. You can create a view (e.g., `movies_view`) that combines the columns you need:
+
+```sql
+create view movies_view as
+ select
+ m.id as objectID, -- Algolia’s unique key
+ m.title,
+ array_agg(distinct c.actor_name) as actor_name,
+ m.genre,
+ m.rating,
+ m.vote_count
+ from
+ movies as m
+ left join movie_cast as c on c.movie_id = m.id
+ group by m.id, m.title, m.rating, m.vote_count;
+```
+
+Later in the Algolia dashboard, you will be able to pick exactly which columns you want to index.
+
+### 2. Go to Algolia dashboard
+
+1. In Algolia, go to [Data Sources → Connectors](https://dashboard.algolia.com/connectors)
+2. Find "Supabase" in the list and click [Connect](https://dashboard.algolia.com/connectors/supabase/create)
+
+### 3. Configure your data source
+
+First, you will need to fill in your Supabase connection info. From the Supabase dashboard:
+
+1. Click the [Connect](/dashboard/project/_?showConnect=true) button found in the top of our header
+2. Scroll down to **Connection Info → Transaction Pooler** and copy **host**, **port**, **database name**, and **username**
+3. Paste the database credentials into the Algolia setup screen
+4. Enter your Supabase database **password**
+5. Select your **schema** (usually `public`)
+6. Give your source a name like `supabase_movies`
+7. Algolia will check the connection and confirm your credentials
+
+### 4. Configure your destination
+
+Once you create Supabase as a data source, you'll need to tell Algolia where to index your data.
+
+1. Select an existing or create a new Algolia index (e.g. `supabase_movies_index`)
+2. Add Index Credentials to this destination by clicking **Create one for me**
+3. Click **Create destination**
+
+### 5. Configure your task and run your sync job
+
+1. Choose how often you want it to sync your data (e.g. every 6 hours)
+2. Pick whether to do full syncs or partial updates
+3. Select the table or view you want to index. We recommend selecting only one table or view for each index
+4. Choose your [objectID](https://www.algolia.com/doc/guides/sending-and-managing-data/prepare-your-data/in-depth/what-is-in-a-record/#unique-record-identifier) (usually your primary key)
+
+Once configured, create the task. Algolia will start syncing records from Supabase into your search index (in the YouTube demo above, 8,800+ movie records were synced in under a minute).
+
+You can now instantly search your Supabase data using Algolia's lightning-fast API.
+
+## No more data pipelines. Just fast search.
+
+With the Algolia + Supabase connector, you don’t need to build or maintain custom data pipelines. With Algolia, you don’t need to worry about scaling your own search infrastructure. With Algolia’s API clients, you just connect and go.
+
+## Getting Started
+
+1. [Supabase](/dashboard)
+2. [Algolia](https://dashboard.algolia.com/users/sign_up)
diff --git a/apps/www/_blog/2025-07-17-new-observability-features-in-supabase.mdx b/apps/www/_blog/2025-07-17-new-observability-features-in-supabase.mdx
index 32baea632f46a..8c9d4b34898ae 100644
--- a/apps/www/_blog/2025-07-17-new-observability-features-in-supabase.mdx
+++ b/apps/www/_blog/2025-07-17-new-observability-features-in-supabase.mdx
@@ -12,16 +12,22 @@ toc_depth: 3
author: jonny,saxon_fletcher,jordi,fsansalvadore
image: launch-week-15/day-4-o11y-day/og.jpg
thumb: launch-week-15/day-4-o11y-day/thumb.png
-launchweek: 15
+launchweek: '15'
---
We are starting to add OpenTelemetry support to [all](https://github.com/supabase/storage/pull/494) [our](https://github.com/supabase/auth/pull/679) [core](https://github.com/supabase/edge-runtime/pull/554) [products](https://github.com/supabase/realtime/commit/c9683f3f5f94bd2e37494f02c1f4415551e96e5b) and [our Telemetry server](https://github.com/Logflare/logflare/pulls?q=is%3Apr+otel+sort%3Acreated-asc). OpenTelemetry (OTel) standardizes logs, metrics, and traces in a vendor-agnostic format, so you can ingest data into tools like Datadog, Honeycomb, or any monitoring solution you already use. While you'll still have the freedom to bring your own observability stack, we're preparing to surface this data natively in the Supabase dashboard.
-Today we are launching
+Today, we are announcing:
-- Preview of our new logging Interface
+- New Logging Interface (coming soon!)
- Advanced Product Reports
-- Supabase AI Assistant with debugging capabilities
+- Supabase AI Assistant with debugging capabilities (coming soon!)
+
+
+
+If you would like early access to the new Logging Interface and Supabase AI Assistant with debugging capabilities please sign up [here](https://forms.supabase.com/unified-logs-signup).
+
+
These updates mark the first step toward unified, end-to-end observability. You won't get the full OTel visualization just yet, but with these foundations in place, you'll soon be able to trace, analyze errors and performance issues, and troubleshoot your entire stack without leaving Supabase.
@@ -37,9 +43,9 @@ These updates mark the first step toward unified, end-to-end observability. You
## New logging Interface
-Supabase is a collection of seamlessly integrated services. Storage talks to Postgres via the dedicated connection pooler. Edge Functions can talk to Auth and Realtime. If storage uploads fail, you must determine whether the problem lies with the storage server, the dedicated connection pooler, or the database. Until now, pinpointing the root cause meant jumping between multiple log streams.
+Supabase is a collection of seamlessly integrated services. Storage talks to Postgres via the dedicated connection pooler. Edge Functions can talk to Auth and Realtime. If Storage uploads fail, you must determine whether the problem lies with the Storage server, the dedicated connection pooler, or the database. Previously, pinpointing the root cause meant jumping between multiple log streams.
-Starting today, there is one interleaved stream of logs across all services. You can trace a single request across the entire Supabase stack. No more jumping between tabs to diagnose errors.
+Now, there is one interleaved stream of logs across all services. You can trace a single request across the entire Supabase stack. No more jumping between tabs to diagnose errors.
Thu, 17 Jul 2025 00:00:00 -0700
+ https://supabase.com/blog/algolia-connector-for-supabase
+ Algolia Connector for Supabase
+ https://supabase.com/blog/algolia-connector-for-supabase
+ Bring lightning-fast search to your Supabase apps, with no code required.
+ Thu, 17 Jul 2025 00:00:00 -0700
+
+https://supabase.com/blog/new-observability-features-in-supabaseNew Observability Features in Supabase
https://supabase.com/blog/new-observability-features-in-supabase
@@ -245,20 +252,6 @@
Technical deep dive into the new DBOS integration for SupabaseTue, 10 Dec 2024 00:00:00 -0700
-
- https://supabase.com/blog/hack-the-base
- Hack the Base! with Supabase
- https://supabase.com/blog/hack-the-base
- Play cool games, win cool prizes.
- Fri, 06 Dec 2024 00:00:00 -0700
-
-
- https://supabase.com/blog/launch-week-13-top-10
- Top 10 Launches of Launch Week 13
- https://supabase.com/blog/launch-week-13-top-10
- Highlights from Launch Week 13
- Fri, 06 Dec 2024 00:00:00 -0700
-https://supabase.com/blog/database-build-v2database.build v2: Bring-your-own-LLM
@@ -273,6 +266,20 @@
Effortlessly Clone Data into a New Supabase ProjectFri, 06 Dec 2024 00:00:00 -0700
+
+ https://supabase.com/blog/hack-the-base
+ Hack the Base! with Supabase
+ https://supabase.com/blog/hack-the-base
+ Play cool games, win cool prizes.
+ Fri, 06 Dec 2024 00:00:00 -0700
+
+
+ https://supabase.com/blog/launch-week-13-top-10
+ Top 10 Launches of Launch Week 13
+ https://supabase.com/blog/launch-week-13-top-10
+ Highlights from Launch Week 13
+ Fri, 06 Dec 2024 00:00:00 -0700
+https://supabase.com/blog/supabase-queuesSupabase Queues