diff --git a/apps/docs/content/troubleshooting/how-to-bypass-cooldown-period.mdx b/apps/docs/content/troubleshooting/how-to-bypass-cooldown-period.mdx new file mode 100644 index 0000000000000..f666c533d6816 --- /dev/null +++ b/apps/docs/content/troubleshooting/how-to-bypass-cooldown-period.mdx @@ -0,0 +1,23 @@ +--- +title = "How to bypass cooldown period" +topics = [ +"platform" +] +keywords = ["cooldown", "disk resize"] +--- + +This cooldown period isn't a Supabase limitation. It's rooted in how Amazon EBS (the underlying storage instance for our databases) manages volume modifications. After modifying a volume (e.g. increasing size, changing type, or IOPS), AWS enforces a mandatory 6-hour cooldown before allowing another modification on the same volume. This is to ensure data integrity and stability of the volume under load. + +From the [**AWS docs**](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyVolume.html): + +> “After modifying a volume, you must wait at least six hours and ensure that the volume is in the in-use or available state before you can modify the same volume. This is sometimes referred to as a cooldown period.” + +There are a few options to work around the cooldown, depending on the state of your database: + +1. **Restore to a new project**: This spins up a new instance with a new disk, bypassing the cooldown entirely. It’s a great option if you're okay with a new project and project refactoring. [**Docs: restoring to a new project**](/docs/guides/platform/backups#restore-to-a-new-project). +2. **pg_upgrade**: Our [**pg_upgrade**](/docs/guides/platform/upgrading) implementation migrates your data to a new disk, which skips the cooldown. The main requirement here is that the database must be operational - it can't run it if your DB is in a degraded or inaccessible state. +3. **Pause and Restore**: This also migrates to a new disk but is only available for projects on the Free plan. If you're not on the Free plan, you'd need to [**transfer your project to an organization on the Free plan**](/docs/guides/platform/project-transfer) first. + +If the database is down or locked in a bad state (e.g corrupted or stuck during resize), the only path forward is to wait until the cooldown expires and the disk resize job completes in the queue. + +More on this in our doc here: [**https://supabase.com/docs/guides/platform/database-size#disk-size**](/docs/guides/platform/database-size#disk-size). diff --git a/apps/studio/components/interfaces/Integrations/Queues/QueuesSettings.tsx b/apps/studio/components/interfaces/Integrations/Queues/QueuesSettings.tsx index eefac54699b47..a50b127c2d59c 100644 --- a/apps/studio/components/interfaces/Integrations/Queues/QueuesSettings.tsx +++ b/apps/studio/components/interfaces/Integrations/Queues/QueuesSettings.tsx @@ -19,6 +19,7 @@ import { QUEUES_SCHEMA, useDatabaseQueueToggleExposeMutation, } from 'data/database-queues/database-queues-toggle-postgrest-mutation' +import { useDatabaseQueuesVersionQuery } from 'data/database-queues/database-queues-version-query' import { useTableUpdateMutation } from 'data/tables/table-update-mutation' import { useTablesQuery } from 'data/tables/tables-query' import { useAsyncCheckPermissions } from 'hooks/misc/useCheckPermissions' @@ -78,6 +79,11 @@ export const QueuesSettings = () => { }) const schemas = config?.db_schema.replace(/ /g, '').split(',') ?? [] + const { data: pgmqVersion } = useDatabaseQueuesVersionQuery({ + projectRef: project?.ref, + connectionString: project?.connectionString, + }) + const { mutateAsync: updateTable } = useTableUpdateMutation() const { mutate: updatePostgrestConfig } = useProjectPostgrestConfigUpdateMutation({ @@ -161,12 +167,16 @@ export const QueuesSettings = () => { `Failed to toggle queue exposure via PostgREST: Unable to retrieve PostgREST configuration (${configError.message})` ) } + if (!pgmqVersion) { + return toast.error('Unable to retrieve PGMQ version. Please try again later.') + } setIsToggling(true) toggleExposeQueuePostgrest({ projectRef: project.ref, connectionString: project.connectionString, enable: values.enable, + pgmqVersion, }) } diff --git a/apps/studio/components/interfaces/Settings/Integrations/GithubIntegration/GitHubIntegrationConnectionForm.tsx b/apps/studio/components/interfaces/Settings/Integrations/GithubIntegration/GitHubIntegrationConnectionForm.tsx index 152da94ad04af..252894e8bbec0 100644 --- a/apps/studio/components/interfaces/Settings/Integrations/GithubIntegration/GitHubIntegrationConnectionForm.tsx +++ b/apps/studio/components/interfaces/Settings/Integrations/GithubIntegration/GitHubIntegrationConnectionForm.tsx @@ -464,7 +464,12 @@ const GitHubIntegrationConnectionForm = ({ - + diff --git a/apps/studio/data/database-queues/constants.ts b/apps/studio/data/database-queues/constants.ts new file mode 100644 index 0000000000000..c87951ea51daa --- /dev/null +++ b/apps/studio/data/database-queues/constants.ts @@ -0,0 +1 @@ +export const PGMQ_EXTENSION_NAME = 'pgmq' as const diff --git a/apps/studio/data/database-queues/database-queues-toggle-postgrest-mutation.ts b/apps/studio/data/database-queues/database-queues-toggle-postgrest-mutation.ts index 4e1e14566e0cd..02567e13c47bc 100644 --- a/apps/studio/data/database-queues/database-queues-toggle-postgrest-mutation.ts +++ b/apps/studio/data/database-queues/database-queues-toggle-postgrest-mutation.ts @@ -4,191 +4,207 @@ import { toast } from 'sonner' import { databaseKeys } from 'data/database/keys' import { executeSql } from 'data/sql/execute-sql-query' +import { isGreaterThanOrEqual } from 'lib/semver' import type { ResponseError } from 'types' import { databaseQueuesKeys } from './keys' export type DatabaseQueueExposePostgrestVariables = { projectRef: string - connectionString?: string | null enable: boolean + pgmqVersion: string + + connectionString?: string | null } +const CONDITIONAL_READ_SIGNATURE_PGMQ_VERSION = '1.5.0' export const QUEUES_SCHEMA = 'pgmq_public' -const EXPOSE_QUEUES_TO_POSTGREST_SQL = minify(/* SQL */ ` -create schema if not exists ${QUEUES_SCHEMA}; -grant usage on schema ${QUEUES_SCHEMA} to postgres, anon, authenticated, service_role; - -create or replace function ${QUEUES_SCHEMA}.pop( - queue_name text -) - returns setof pgmq.message_record - language plpgsql - set search_path = '' -as $$ -begin - return query - select * - from pgmq.pop( - queue_name := queue_name - ); -end; -$$; - -comment on function ${QUEUES_SCHEMA}.pop(queue_name text) is 'Retrieves and locks the next message from the specified queue.'; - - -create or replace function ${QUEUES_SCHEMA}.send( - queue_name text, - message jsonb, - sleep_seconds integer default 0 -- renamed from 'delay' -) - returns setof bigint - language plpgsql - set search_path = '' -as $$ -begin - return query - select * - from pgmq.send( - queue_name := queue_name, - msg := message, - delay := sleep_seconds - ); -end; -$$; - -comment on function ${QUEUES_SCHEMA}.send(queue_name text, message jsonb, sleep_seconds integer) is 'Sends a message to the specified queue, optionally delaying its availability by a number of seconds.'; - - -create or replace function ${QUEUES_SCHEMA}.send_batch( - queue_name text, - messages jsonb[], - sleep_seconds integer default 0 -- renamed from 'delay' -) - returns setof bigint - language plpgsql - set search_path = '' -as $$ -begin - return query - select * - from pgmq.send_batch( - queue_name := queue_name, - msgs := messages, - delay := sleep_seconds - ); -end; -$$; - -comment on function ${QUEUES_SCHEMA}.send_batch(queue_name text, messages jsonb[], sleep_seconds integer) is 'Sends a batch of messages to the specified queue, optionally delaying their availability by a number of seconds.'; - - -create or replace function ${QUEUES_SCHEMA}.archive( - queue_name text, - message_id bigint -) - returns boolean - language plpgsql - set search_path = '' -as $$ -begin - return - pgmq.archive( - queue_name := queue_name, - msg_id := message_id - ); -end; -$$; - -comment on function ${QUEUES_SCHEMA}.archive(queue_name text, message_id bigint) is 'Archives a message by moving it from the queue to a permanent archive.'; - - -create or replace function ${QUEUES_SCHEMA}.delete( - queue_name text, - message_id bigint -) - returns boolean - language plpgsql - set search_path = '' -as $$ -begin - return - pgmq.delete( - queue_name := queue_name, - msg_id := message_id - ); -end; -$$; - -comment on function ${QUEUES_SCHEMA}.delete(queue_name text, message_id bigint) is 'Permanently deletes a message from the specified queue.'; - -create or replace function ${QUEUES_SCHEMA}.read( - queue_name text, - sleep_seconds integer, - n integer -) - returns setof pgmq.message_record - language plpgsql - set search_path = '' -as $$ -begin - return query - select * - from pgmq.read( - queue_name := queue_name, - vt := sleep_seconds, - qty := n - ); -end; -$$; - -comment on function ${QUEUES_SCHEMA}.read(queue_name text, sleep_seconds integer, n integer) is 'Reads up to "n" messages from the specified queue with an optional "sleep_seconds" (visibility timeout).'; - --- Grant execute permissions on wrapper functions to roles -grant execute on function ${QUEUES_SCHEMA}.pop(text) to postgres, service_role, anon, authenticated; -grant execute on function pgmq.pop(text) to postgres, service_role, anon, authenticated; - -grant execute on function ${QUEUES_SCHEMA}.send(text, jsonb, integer) to postgres, service_role, anon, authenticated; -grant execute on function pgmq.send(text, jsonb, integer) to postgres, service_role, anon, authenticated; - -grant execute on function ${QUEUES_SCHEMA}.send_batch(text, jsonb[], integer) to postgres, service_role, anon, authenticated; -grant execute on function pgmq.send_batch(text, jsonb[], integer) to postgres, service_role, anon, authenticated; - -grant execute on function ${QUEUES_SCHEMA}.archive(text, bigint) to postgres, service_role, anon, authenticated; -grant execute on function pgmq.archive(text, bigint) to postgres, service_role, anon, authenticated; - -grant execute on function ${QUEUES_SCHEMA}.delete(text, bigint) to postgres, service_role, anon, authenticated; -grant execute on function pgmq.delete(text, bigint) to postgres, service_role, anon, authenticated; - -grant execute on function ${QUEUES_SCHEMA}.read(text, integer, integer) to postgres, service_role, anon, authenticated; -grant execute on function pgmq.read(text, integer, integer) to postgres, service_role, anon, authenticated; - --- For the service role, we want full access --- Grant permissions on existing tables -grant all privileges on all tables in schema pgmq to postgres, service_role; - --- Ensure service_role has permissions on future tables -alter default privileges in schema pgmq grant all privileges on tables to postgres, service_role; - -grant usage on schema pgmq to postgres, anon, authenticated, service_role; - - -/* - Grant access to sequences to API roles by default. Existing table permissions - continue to enforce insert restrictions. This is necessary to accommodate the - on-backup hook that rebuild queue table primary keys to avoid a pg_dump segfault. - This can be removed once logical backups are completely retired. -*/ -grant usage, select, update -on all sequences in schema pgmq -to anon, authenticated, service_role; - -alter default privileges in schema pgmq -grant usage, select, update -on sequences -to anon, authenticated, service_role; +const getExposeQueuesSQL = (pgmqVersion: string) => { + const conditionalJsonb = isGreaterThanOrEqual( + pgmqVersion, + CONDITIONAL_READ_SIGNATURE_PGMQ_VERSION + ) + ? `, conditional := '{}'::jsonb` + : '' + const jsonBArg = isGreaterThanOrEqual(pgmqVersion, CONDITIONAL_READ_SIGNATURE_PGMQ_VERSION) + ? `, jsonb` + : '' + + return minify(/* SQL */ ` + create schema if not exists ${QUEUES_SCHEMA}; + grant usage on schema ${QUEUES_SCHEMA} to postgres, anon, authenticated, service_role; + + create or replace function ${QUEUES_SCHEMA}.pop( + queue_name text + ) + returns setof pgmq.message_record + language plpgsql + set search_path = '' + as $$ + begin + return query + select * + from pgmq.pop( + queue_name := queue_name + ); + end; + $$; + + comment on function ${QUEUES_SCHEMA}.pop(queue_name text) is 'Retrieves and locks the next message from the specified queue.'; + + + create or replace function ${QUEUES_SCHEMA}.send( + queue_name text, + message jsonb, + sleep_seconds integer default 0 -- renamed from 'delay' + ) + returns setof bigint + language plpgsql + set search_path = '' + as $$ + begin + return query + select * + from pgmq.send( + queue_name := queue_name, + msg := message, + delay := sleep_seconds + ); + end; + $$; + + comment on function ${QUEUES_SCHEMA}.send(queue_name text, message jsonb, sleep_seconds integer) is 'Sends a message to the specified queue, optionally delaying its availability by a number of seconds.'; + + + create or replace function ${QUEUES_SCHEMA}.send_batch( + queue_name text, + messages jsonb[], + sleep_seconds integer default 0 -- renamed from 'delay' + ) + returns setof bigint + language plpgsql + set search_path = '' + as $$ + begin + return query + select * + from pgmq.send_batch( + queue_name := queue_name, + msgs := messages, + delay := sleep_seconds + ); + end; + $$; + + comment on function ${QUEUES_SCHEMA}.send_batch(queue_name text, messages jsonb[], sleep_seconds integer) is 'Sends a batch of messages to the specified queue, optionally delaying their availability by a number of seconds.'; + + + create or replace function ${QUEUES_SCHEMA}.archive( + queue_name text, + message_id bigint + ) + returns boolean + language plpgsql + set search_path = '' + as $$ + begin + return + pgmq.archive( + queue_name := queue_name, + msg_id := message_id + ); + end; + $$; + + comment on function ${QUEUES_SCHEMA}.archive(queue_name text, message_id bigint) is 'Archives a message by moving it from the queue to a permanent archive.'; + + + create or replace function ${QUEUES_SCHEMA}.delete( + queue_name text, + message_id bigint + ) + returns boolean + language plpgsql + set search_path = '' + as $$ + begin + return + pgmq.delete( + queue_name := queue_name, + msg_id := message_id + ); + end; + $$; + + comment on function ${QUEUES_SCHEMA}.delete(queue_name text, message_id bigint) is 'Permanently deletes a message from the specified queue.'; + + create or replace function ${QUEUES_SCHEMA}.read( + queue_name text, + sleep_seconds integer, + n integer + ) + returns setof pgmq.message_record + language plpgsql + set search_path = '' + as $$ + begin + return query + select * + from pgmq.read( + queue_name := queue_name, + vt := sleep_seconds, + qty := n ${conditionalJsonb} + ); + end; + $$; + + comment on function ${QUEUES_SCHEMA}.read(queue_name text, sleep_seconds integer, n integer) is 'Reads up to "n" messages from the specified queue with an optional "sleep_seconds" (visibility timeout).'; + + -- Grant execute permissions on wrapper functions to roles + grant execute on function ${QUEUES_SCHEMA}.pop(text) to postgres, service_role, anon, authenticated; + grant execute on function pgmq.pop(text) to postgres, service_role, anon, authenticated; + + grant execute on function ${QUEUES_SCHEMA}.send(text, jsonb, integer) to postgres, service_role, anon, authenticated; + grant execute on function pgmq.send(text, jsonb, integer) to postgres, service_role, anon, authenticated; + + grant execute on function ${QUEUES_SCHEMA}.send_batch(text, jsonb[], integer) to postgres, service_role, anon, authenticated; + grant execute on function pgmq.send_batch(text, jsonb[], integer) to postgres, service_role, anon, authenticated; + + grant execute on function ${QUEUES_SCHEMA}.archive(text, bigint) to postgres, service_role, anon, authenticated; + grant execute on function pgmq.archive(text, bigint) to postgres, service_role, anon, authenticated; + + grant execute on function ${QUEUES_SCHEMA}.delete(text, bigint) to postgres, service_role, anon, authenticated; + grant execute on function pgmq.delete(text, bigint) to postgres, service_role, anon, authenticated; + + grant execute on function ${QUEUES_SCHEMA}.read(text, integer, integer) to postgres, service_role, anon, authenticated; + grant execute on function pgmq.read(text, integer, integer ${jsonBArg}) to postgres, service_role, anon, authenticated; + + -- For the service role, we want full access + -- Grant permissions on existing tables + grant all privileges on all tables in schema pgmq to postgres, service_role; + + -- Ensure service_role has permissions on future tables + alter default privileges in schema pgmq grant all privileges on tables to postgres, service_role; + + grant usage on schema pgmq to postgres, anon, authenticated, service_role; + + + /* + Grant access to sequences to API roles by default. Existing table permissions + continue to enforce insert restrictions. This is necessary to accommodate the + on-backup hook that rebuild queue table primary keys to avoid a pg_dump segfault. + This can be removed once logical backups are completely retired. + */ + grant usage, select, update + on all sequences in schema pgmq + to anon, authenticated, service_role; + + alter default privileges in schema pgmq + grant usage, select, update + on sequences + to anon, authenticated, service_role; `) +} const HIDE_QUEUES_FROM_POSTGREST_SQL = minify(/* SQL */ ` drop function if exists @@ -219,10 +235,11 @@ const HIDE_QUEUES_FROM_POSTGREST_SQL = minify(/* SQL */ ` export async function toggleQueuesExposurePostgrest({ projectRef, + pgmqVersion, connectionString, enable, }: DatabaseQueueExposePostgrestVariables) { - const sql = enable ? EXPOSE_QUEUES_TO_POSTGREST_SQL : HIDE_QUEUES_FROM_POSTGREST_SQL + const sql = enable ? getExposeQueuesSQL(pgmqVersion) : HIDE_QUEUES_FROM_POSTGREST_SQL const { result } = await executeSql({ projectRef, diff --git a/apps/studio/data/database-queues/database-queues-version-query.ts b/apps/studio/data/database-queues/database-queues-version-query.ts new file mode 100644 index 0000000000000..013b358782492 --- /dev/null +++ b/apps/studio/data/database-queues/database-queues-version-query.ts @@ -0,0 +1,30 @@ +import { useDatabaseExtensionsQuery } from 'data/database-extensions/database-extensions-query' +import { ResponseError } from 'types' +import { PGMQ_EXTENSION_NAME } from './constants' + +export type DatabaseQueuesVersionVariables = { + projectRef?: string + connectionString?: string | null +} + +export type DatabaseQueueVersionData = string | null +export type DatabaseQueueVersionError = ResponseError + +export const useDatabaseQueuesVersionQuery = ( + { projectRef, connectionString }: DatabaseQueuesVersionVariables, + { enabled = true }: { enabled?: boolean } = {} +) => { + return useDatabaseExtensionsQuery( + { projectRef, connectionString }, + { + enabled, + select: (extensions) => { + const pgmqExtension = extensions.find((ext) => ext.name === PGMQ_EXTENSION_NAME) + + if (!pgmqExtension?.installed_version) return null + + return pgmqExtension.installed_version + }, + } + ) +} diff --git a/apps/studio/data/table-rows/table-rows-query.ts b/apps/studio/data/table-rows/table-rows-query.ts index fef01f7d81a90..d6afca2f23c5e 100644 --- a/apps/studio/data/table-rows/table-rows-query.ts +++ b/apps/studio/data/table-rows/table-rows-query.ts @@ -20,6 +20,7 @@ import { isRoleImpersonationEnabled } from 'state/role-impersonation-state' import { ExecuteSqlError, executeSql } from '../sql/execute-sql-query' import { tableRowKeys } from './keys' import { formatFilterValue } from './utils' +import { ResponseError } from 'types' export interface GetTableRowsArgs { table?: SupaTable @@ -42,6 +43,27 @@ const getDefaultOrderByColumns = (table: SupaTable) => { } } +function getErrorCode(error: any): number | undefined { + // Our custom ResponseError's use 'code' instead of 'status' + if (error instanceof ResponseError) { + return error.code + } + return error.status +} + +function getRetryAfter(error: any): number | undefined { + if (error instanceof ResponseError) { + return error.retryAfter + } + + const headerRetry = error.headers?.get('retry-after') + if (headerRetry) { + return parseInt(headerRetry) + } + + return undefined +} + async function sleep(ms: number) { return new Promise((resolve) => setTimeout(resolve, ms)) } @@ -54,12 +76,13 @@ export async function executeWithRetry( for (let attempt = 0; attempt <= maxRetries; attempt++) { try { return await fn() - } catch (error: any) { - // Our custom ResponseError's use 'code' instead of 'status' - if ((error?.status ?? error?.code) === 429 && attempt < maxRetries) { + } catch (error: unknown) { + const errorCode = getErrorCode(error) + if (errorCode === 429 && attempt < maxRetries) { // Get retry delay from headers or use exponential backoff (1s, then 2s, then 4s) - const retryAfter = error.headers?.get('retry-after') - const delayMs = retryAfter ? parseInt(retryAfter) * 1000 : baseDelay * Math.pow(2, attempt) + const retryAfter = getRetryAfter(error) + const delayMs = retryAfter ? retryAfter * 1000 : baseDelay * Math.pow(2, attempt) + await sleep(delayMs) continue } diff --git a/apps/studio/lib/api/self-hosted/lints.ts b/apps/studio/lib/api/self-hosted/lints.ts index 291d8f512c8eb..1add108301dff 100644 --- a/apps/studio/lib/api/self-hosted/lints.ts +++ b/apps/studio/lib/api/self-hosted/lints.ts @@ -1,28 +1,41 @@ +import { paths } from 'api-types' import { DOCS_URL } from 'lib/constants' import { executeQuery } from './query' -import { paths } from 'api-types' -import { WrappedResult } from 'lib/api/self-hosted/types' interface GetLintsOptions { headers?: HeadersInit + exposedSchemas?: string } -export async function getLints({ headers }: GetLintsOptions) { - return await executeQuery({ query: enrichQuery(LINT_SQL), headers }) +export async function getLints({ headers, exposedSchemas }: GetLintsOptions) { + return await executeQuery({ + query: enrichLintsQuery(LINT_SQL, exposedSchemas), + headers, + }) } export type ResponseData = paths['/platform/projects/{ref}/run-lints']['get']['responses']['200']['content']['application/json'] -export const enrichQuery = (query: string) => ` +export const enrichLintsQuery = (query: string, exposedSchemas?: string) => { + const literalSchemas = exposedSchemas ? `'${exposedSchemas}'` : '' + return ` +set pg_stat_statements.track = none; +set local pgrst.db_schemas = ${literalSchemas}; -- source: dashboard -- user: ${'self host'} -- date: ${new Date().toISOString()} ${query} ` +} -// Pulled from https://github.com/supabase/splinter/blob/main/splinter.sql +/** + * Pulled from https://github.com/supabase/splinter/blob/main/splinter.sql + * Things to do after copy pasting from splinter.sql + * - Replace all "\`%s\`" with backquotes to escape the tick character ("\`%s\`") + * - Replace docs url with DOCS_URL (${DOCS_URL}) + */ const LINT_SQL = /* SQL */ `set local search_path = ''; ( @@ -83,14 +96,14 @@ from foreign_keys fk left join index_ idx on fk.table_oid = idx.table_oid - and fk.col_attnums = idx.col_attnums + and fk.col_attnums = idx.col_attnums[1:array_length(fk.col_attnums, 1)] left join pg_catalog.pg_depend dep on idx.table_oid = dep.objid and dep.deptype = 'e' where idx.index_ is null and fk.schema_name not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and dep.objid is null -- exclude tables owned by extensions order by @@ -222,9 +235,9 @@ select 'WARN' as level, 'EXTERNAL' as facing, array['PERFORMANCE'] as categories, - 'Detects if calls to \`auth.()\` in RLS policies are being unnecessarily re-evaluated for each row' as description, + 'Detects if calls to \`current_setting()\` and \`auth.()\` in RLS policies are being unnecessarily re-evaluated for each row' as description, format( - 'Table \`%s.%s\` has a row level security policy \`%s\` that re-evaluates an auth.() for each row. This produces suboptimal query performance at scale. Resolve the issue by replacing \`auth.()\` with \`(select auth.())\`. See [docs](${DOCS_URL}/guides/database/postgres/row-level-security#call-functions-with-select) for more info.', + 'Table \`%s.%s\` has a row level security policy \`%s\` that re-evaluates current_setting() or auth.() for each row. This produces suboptimal query performance at scale. Resolve the issue by replacing \`auth.()\` with \`(select auth.())\`. See [docs](${DOCS_URL}/guides/database/postgres/row-level-security#call-functions-with-select) for more info.', schema_name, table_name, policy_name @@ -242,7 +255,7 @@ where is_rls_active -- NOTE: does not include realtime in support of monitoring policies on realtime.messages and schema_name not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and ( -- Example: auth.uid() @@ -262,6 +275,10 @@ where qual like '%auth.email()%' and lower(qual) not like '%select auth.email()%' ) + or ( + qual like '%current\_setting(%)%' + and lower(qual) not like '%select current\_setting(%)%' + ) or ( with_check like '%auth.uid()%' and lower(with_check) not like '%select auth.uid()%' @@ -278,6 +295,10 @@ where with_check like '%auth.email()%' and lower(with_check) not like '%select auth.email()%' ) + or ( + with_check like '%current\_setting(%)%' + and lower(with_check) not like '%select current\_setting(%)%' + ) )) union all ( @@ -316,7 +337,7 @@ from where pgc.relkind = 'r' -- regular tables and pgns.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and dep.objid is null -- exclude tables owned by extensions group by @@ -366,7 +387,7 @@ where and not pi.indisprimary and dep.objid is null -- exclude tables owned by extensions and psui.schemaname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' )) union all ( @@ -428,7 +449,7 @@ where c.relkind = 'r' -- regular tables and p.polpermissive -- policy is permissive and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and r.rolname not like 'pg_%' and r.rolname not like 'supabase%admin' @@ -479,7 +500,7 @@ from where c.relkind = 'r' -- regular tables and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) -- RLS is disabled and not c.relrowsecurity @@ -524,7 +545,7 @@ from where c.relkind = 'r' -- regular tables and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) -- RLS is enabled and c.relrowsecurity @@ -578,7 +599,7 @@ from where c.relkind in ('r', 'm') -- tables and materialized views and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and dep.objid is null -- exclude tables owned by extensions group by @@ -629,7 +650,7 @@ where and substring(pg_catalog.version() from 'PostgreSQL ([0-9]+)') >= '15' -- security invoker was added in pg15 and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and dep.objid is null -- exclude views owned by extensions and not ( @@ -649,7 +670,7 @@ select 'WARN' as level, 'EXTERNAL' as facing, array['SECURITY'] as categories, - 'Detects functions where the search_path parameter is not set to an empty string.' as description, + 'Detects functions where the search_path parameter is not set.' as description, format( 'Function \`%s.%s\` has a role mutable search_path', n.nspname, @@ -676,11 +697,15 @@ from and dep.deptype = 'e' where n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and dep.objid is null -- exclude functions owned by extensions - -- Search path not set to '' - and not coalesce(p.proconfig, '{}') && array['search_path=""']) + -- Search path not set + and not exists ( + select 1 + from unnest(coalesce(p.proconfig, '{}')) as config + where config like 'search_path=%' + )) union all ( select @@ -720,7 +745,7 @@ where ) and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' )) union all ( @@ -800,7 +825,7 @@ from policies where schema_name not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and ( -- Example: auth.jwt() -> 'user_metadata' @@ -851,7 +876,7 @@ where ) and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and dep.objid is null) union all @@ -894,7 +919,7 @@ where ) and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) and n.nspname not in ( - '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' + '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' ) and dep.objid is null) union all @@ -939,4 +964,225 @@ from where tn.nspname = 'pg_catalog' and t.typname in ('regcollation', 'regconfig', 'regdictionary', 'regnamespace', 'regoper', 'regoperator', 'regproc', 'regprocedure') - and n.nspname not in ('pg_catalog', 'information_schema', 'pgsodium'))`.trim() + and n.nspname not in ('pg_catalog', 'information_schema', 'pgsodium')) +union all +( +select + 'insecure_queue_exposed_in_api' as name, + 'Insecure Queue Exposed in API' as title, + 'ERROR' as level, + 'EXTERNAL' as facing, + array['SECURITY'] as categories, + 'Detects cases where an insecure Queue is exposed over Data APIs' as description, + format( + 'Table \`%s.%s\` is public, but RLS has not been enabled.', + n.nspname, + c.relname + ) as detail, + '${DOCS_URL}/guides/database/database-linter?lint=0019_insecure_queue_exposed_in_api' as remediation, + jsonb_build_object( + 'schema', n.nspname, + 'name', c.relname, + 'type', 'table' + ) as metadata, + format( + 'rls_disabled_in_public_%s_%s', + n.nspname, + c.relname + ) as cache_key +from + pg_catalog.pg_class c + join pg_catalog.pg_namespace n + on c.relnamespace = n.oid +where + c.relkind in ('r', 'I') -- regular or partitioned tables + and not c.relrowsecurity -- RLS is disabled + and ( + pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') + or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') + ) + and n.nspname = 'pgmq' -- tables in the pgmq schema + and c.relname like 'q_%' -- only queue tables + -- Constant requirements + and 'pgmq_public' = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))) +union all +( +with constants as ( + select current_setting('block_size')::numeric as bs, 23 as hdr, 4 as ma +), + +bloat_info as ( + select + ma, + bs, + schemaname, + tablename, + (datawidth + (hdr + ma - (case when hdr % ma = 0 then ma else hdr % ma end)))::numeric as datahdr, + (maxfracsum * (nullhdr + ma - (case when nullhdr % ma = 0 then ma else nullhdr % ma end))) as nullhdr2 + from ( + select + schemaname, + tablename, + hdr, + ma, + bs, + sum((1 - null_frac) * avg_width) as datawidth, + max(null_frac) as maxfracsum, + hdr + ( + select 1 + count(*) / 8 + from pg_stats s2 + where + null_frac <> 0 + and s2.schemaname = s.schemaname + and s2.tablename = s.tablename + ) as nullhdr + from pg_stats s, constants + group by 1, 2, 3, 4, 5 + ) as foo +), + +table_bloat as ( + select + schemaname, + tablename, + cc.relpages, + bs, + ceil((cc.reltuples * ((datahdr + ma - + (case when datahdr % ma = 0 then ma else datahdr % ma end)) + nullhdr2 + 4)) / (bs - 20::float)) as otta + from + bloat_info + join pg_class cc + on cc.relname = bloat_info.tablename + join pg_namespace nn + on cc.relnamespace = nn.oid + and nn.nspname = bloat_info.schemaname + and nn.nspname <> 'information_schema' + where + cc.relkind = 'r' + and cc.relam = (select oid from pg_am where amname = 'heap') +), + +bloat_data as ( + select + 'table' as type, + schemaname, + tablename as object_name, + round(case when otta = 0 then 0.0 else table_bloat.relpages / otta::numeric end, 1) as bloat, + case when relpages < otta then 0 else (bs * (table_bloat.relpages - otta)::bigint)::bigint end as raw_waste + from + table_bloat +) + +select + 'table_bloat' as name, + 'Table Bloat' as title, + 'INFO' as level, + 'EXTERNAL' as facing, + array['PERFORMANCE'] as categories, + 'Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster.' as description, + format( + 'Table \`%s\`.\`%s\` has excessive bloat', + bloat_data.schemaname, + bloat_data.object_name + ) as detail, + 'Consider running vacuum full (WARNING: incurs downtime) and tweaking autovacuum settings to reduce bloat.' as remediation, + jsonb_build_object( + 'schema', bloat_data.schemaname, + 'name', bloat_data.object_name, + 'type', bloat_data.type + ) as metadata, + format( + 'table_bloat_%s_%s', + bloat_data.schemaname, + bloat_data.object_name + ) as cache_key +from + bloat_data +where + bloat > 70.0 + and raw_waste > (20 * 1024 * 1024) -- filter for waste > 200 MB +order by + schemaname, + object_name) +union all +( +select + 'fkey_to_auth_unique' as name, + 'Foreign Key to Auth Unique Constraint' as title, + 'ERROR' as level, + 'EXTERNAL' as facing, + array['SECURITY'] as categories, + 'Detects user defined foreign keys to unique constraints in the auth schema.' as description, + format( + 'Table \`%s\`.\`%s\` has a foreign key \`%s\` referencing an auth unique constraint', + n.nspname, -- referencing schema + c_rel.relname, -- referencing table + c.conname -- fkey name + ) as detail, + 'Drop the foreign key constraint that references the auth schema.' as remediation, + jsonb_build_object( + 'schema', n.nspname, + 'name', c_rel.relname, + 'foreign_key', c.conname + ) as metadata, + format( + 'fkey_to_auth_unique_%s_%s_%s', + n.nspname, -- referencing schema + c_rel.relname, -- referencing table + c.conname + ) as cache_key +from + pg_catalog.pg_constraint c + join pg_catalog.pg_class c_rel + on c.conrelid = c_rel.oid + join pg_catalog.pg_namespace n + on c_rel.relnamespace = n.oid + join pg_catalog.pg_class ref_rel + on c.confrelid = ref_rel.oid + join pg_catalog.pg_namespace cn + on ref_rel.relnamespace = cn.oid + join pg_catalog.pg_index i + on c.conindid = i.indexrelid +where c.contype = 'f' + and cn.nspname = 'auth' + and i.indisunique + and not i.indisprimary) +union all +( +select + 'extension_versions_outdated' as name, + 'Extension Versions Outdated' as title, + 'WARN' as level, + 'EXTERNAL' as facing, + array['SECURITY'] as categories, + 'Detects extensions that are not using the default (recommended) version.' as description, + format( + 'Extension \`%s\` is using version \`%s\` but version \`%s\` is available. Using outdated extension versions may expose the database to security vulnerabilities.', + ext.name, + ext.installed_version, + ext.default_version + ) as detail, + '${DOCS_URL}/guides/database/database-linter?lint=0022_extension_versions_outdated' as remediation, + jsonb_build_object( + 'extension_name', ext.name, + 'installed_version', ext.installed_version, + 'default_version', ext.default_version + ) as metadata, + format( + 'extension_versions_outdated_%s_%s', + ext.name, + ext.installed_version + ) as cache_key +from + pg_catalog.pg_available_extensions ext +join + -- ignore versions not in pg_available_extension_versions + -- e.g. residue of pg_upgrade + pg_catalog.pg_available_extension_versions extv + on extv.name = ext.name and extv.installed +where + ext.installed_version is not null + and ext.default_version is not null + and ext.installed_version != ext.default_version +order by + ext.name)`.trim() diff --git a/apps/studio/lib/semver.test.ts b/apps/studio/lib/semver.test.ts new file mode 100644 index 0000000000000..2b134f8d5ad20 --- /dev/null +++ b/apps/studio/lib/semver.test.ts @@ -0,0 +1,166 @@ +import { describe, it, expect } from 'vitest' +import { + parseSemver, + compareSemver, + isGreaterThan, + isLessThan, + isEqual, + isGreaterThanOrEqual, + isLessThanOrEqual, + isValidSemver, +} from './semver' + +describe('parseSemver', () => { + it('should parse valid semver strings', () => { + expect(parseSemver('1.2.3')).toEqual({ major: 1, minor: 2, patch: 3 }) + expect(parseSemver('0.0.1')).toEqual({ major: 0, minor: 0, patch: 1 }) + expect(parseSemver('10.20.30')).toEqual({ major: 10, minor: 20, patch: 30 }) + }) + + it('should handle strings with extra whitespace', () => { + expect(parseSemver(' 1.2.3 ')).toEqual({ major: 1, minor: 2, patch: 3 }) + expect(parseSemver(' 1.2.3 ')).toEqual({ major: 1, minor: 2, patch: 3 }) + }) + + it('should return null for invalid semver strings', () => { + expect(parseSemver('1.2')).toBeNull() + expect(parseSemver('1.2.3.4')).toBeNull() + expect(parseSemver('1.2.x')).toBeNull() + expect(parseSemver('a.b.c')).toBeNull() + expect(parseSemver('')).toBeNull() + expect(parseSemver('invalid')).toBeNull() + }) + + it('should return null for negative numbers', () => { + expect(parseSemver('-1.2.3')).toBeNull() + expect(parseSemver('1.-2.3')).toBeNull() + expect(parseSemver('1.2.-3')).toBeNull() + }) + + it('should return null for non-string inputs', () => { + expect(parseSemver(null as any)).toBeNull() + expect(parseSemver(undefined as any)).toBeNull() + expect(parseSemver(123 as any)).toBeNull() + }) +}) + +describe('compareSemver', () => { + it('should return 0 for equal versions', () => { + expect(compareSemver('1.2.3', '1.2.3')).toBe(0) + expect(compareSemver('0.0.0', '0.0.0')).toBe(0) + expect(compareSemver('10.20.30', '10.20.30')).toBe(0) + }) + + it('should return 1 when first version is greater', () => { + expect(compareSemver('2.0.0', '1.0.0')).toBe(1) + expect(compareSemver('1.3.0', '1.2.0')).toBe(1) + expect(compareSemver('1.2.4', '1.2.3')).toBe(1) + expect(compareSemver('2.0.0', '1.9.9')).toBe(1) + }) + + it('should return -1 when first version is less', () => { + expect(compareSemver('1.0.0', '2.0.0')).toBe(-1) + expect(compareSemver('1.2.0', '1.3.0')).toBe(-1) + expect(compareSemver('1.2.3', '1.2.4')).toBe(-1) + expect(compareSemver('1.9.9', '2.0.0')).toBe(-1) + }) + + it('should return null for invalid versions', () => { + expect(compareSemver('1.2.3', 'invalid')).toBeNull() + expect(compareSemver('invalid', '1.2.3')).toBeNull() + expect(compareSemver('1.2', '1.2.3')).toBeNull() + }) + + it('should prioritize major version differences', () => { + expect(compareSemver('2.0.0', '1.99.99')).toBe(1) + expect(compareSemver('1.99.99', '2.0.0')).toBe(-1) + }) + + it('should prioritize minor version differences when major is equal', () => { + expect(compareSemver('1.3.0', '1.2.99')).toBe(1) + expect(compareSemver('1.2.99', '1.3.0')).toBe(-1) + }) +}) + +describe('isGreaterThan', () => { + it('should return true when first version is greater', () => { + expect(isGreaterThan('2.0.0', '1.0.0')).toBe(true) + expect(isGreaterThan('1.3.0', '1.2.0')).toBe(true) + expect(isGreaterThan('1.2.4', '1.2.3')).toBe(true) + }) + + it('should return false when first version is not greater', () => { + expect(isGreaterThan('1.0.0', '2.0.0')).toBe(false) + expect(isGreaterThan('1.2.3', '1.2.3')).toBe(false) + expect(isGreaterThan('1.2.3', 'invalid')).toBe(false) + }) +}) + +describe('isLessThan', () => { + it('should return true when first version is less', () => { + expect(isLessThan('1.0.0', '2.0.0')).toBe(true) + expect(isLessThan('1.2.0', '1.3.0')).toBe(true) + expect(isLessThan('1.2.3', '1.2.4')).toBe(true) + }) + + it('should return false when first version is not less', () => { + expect(isLessThan('2.0.0', '1.0.0')).toBe(false) + expect(isLessThan('1.2.3', '1.2.3')).toBe(false) + expect(isLessThan('1.2.3', 'invalid')).toBe(false) + }) +}) + +describe('isEqual', () => { + it('should return true when versions are equal', () => { + expect(isEqual('1.2.3', '1.2.3')).toBe(true) + expect(isEqual('0.0.0', '0.0.0')).toBe(true) + expect(isEqual('10.20.30', '10.20.30')).toBe(true) + }) + + it('should return false when versions are not equal', () => { + expect(isEqual('1.2.3', '1.2.4')).toBe(false) + expect(isEqual('1.2.3', '2.2.3')).toBe(false) + expect(isEqual('1.2.3', 'invalid')).toBe(false) + }) +}) + +describe('isGreaterThanOrEqual', () => { + it('should return true when first version is greater or equal', () => { + expect(isGreaterThanOrEqual('2.0.0', '1.0.0')).toBe(true) + expect(isGreaterThanOrEqual('1.2.3', '1.2.3')).toBe(true) + expect(isGreaterThanOrEqual('1.2.4', '1.2.3')).toBe(true) + }) + + it('should return false when first version is less', () => { + expect(isGreaterThanOrEqual('1.0.0', '2.0.0')).toBe(false) + expect(isGreaterThanOrEqual('1.2.3', 'invalid')).toBe(false) + }) +}) + +describe('isLessThanOrEqual', () => { + it('should return true when first version is less or equal', () => { + expect(isLessThanOrEqual('1.0.0', '2.0.0')).toBe(true) + expect(isLessThanOrEqual('1.2.3', '1.2.3')).toBe(true) + expect(isLessThanOrEqual('1.2.3', '1.2.4')).toBe(true) + }) + + it('should return false when first version is greater', () => { + expect(isLessThanOrEqual('2.0.0', '1.0.0')).toBe(false) + expect(isLessThanOrEqual('1.2.3', 'invalid')).toBe(false) + }) +}) + +describe('isValidSemver', () => { + it('should return true for valid semver strings', () => { + expect(isValidSemver('1.2.3')).toBe(true) + expect(isValidSemver('0.0.0')).toBe(true) + expect(isValidSemver('10.20.30')).toBe(true) + }) + + it('should return false for invalid semver strings', () => { + expect(isValidSemver('1.2')).toBe(false) + expect(isValidSemver('1.2.3.4')).toBe(false) + expect(isValidSemver('invalid')).toBe(false) + expect(isValidSemver('')).toBe(false) + }) +}) diff --git a/apps/studio/lib/semver.ts b/apps/studio/lib/semver.ts new file mode 100644 index 0000000000000..969e8d83ff358 --- /dev/null +++ b/apps/studio/lib/semver.ts @@ -0,0 +1,130 @@ +/** + * Semantic versioning utility for comparing version strings in the format "x.x.x" + */ + +export interface SemverVersion { + major: number + minor: number + patch: number +} + +/** + * Parses a semver string in the format "x.x.x" into its components + * @param version - The version string to parse (e.g., "1.2.3") + * @returns The parsed version components or null if invalid + */ +export function parseSemver(version: string): SemverVersion | null { + if (!version || typeof version !== 'string') { + return null + } + + const parts = version.trim().split('.') + + if (parts.length !== 3) { + return null + } + + const major = parseInt(parts[0], 10) + const minor = parseInt(parts[1], 10) + const patch = parseInt(parts[2], 10) + + if (isNaN(major) || isNaN(minor) || isNaN(patch)) { + return null + } + + if (major < 0 || minor < 0 || patch < 0) { + return null + } + + return { major, minor, patch } +} + +/** + * Compares two semver version strings + * @param a - First version string + * @param b - Second version string + * @returns -1 if a < b, 0 if a === b, 1 if a > b, or null if either version is invalid + */ +export function compareSemver(a: string, b: string): -1 | 0 | 1 | null { + const versionA = parseSemver(a) + const versionB = parseSemver(b) + + if (!versionA || !versionB) { + return null + } + + if (versionA.major !== versionB.major) { + return versionA.major > versionB.major ? 1 : -1 + } + + if (versionA.minor !== versionB.minor) { + return versionA.minor > versionB.minor ? 1 : -1 + } + + if (versionA.patch !== versionB.patch) { + return versionA.patch > versionB.patch ? 1 : -1 + } + + return 0 +} + +/** + * Checks if version a is greater than version b + * @param a - First version string + * @param b - Second version string + * @returns true if a > b, false otherwise + */ +export function isGreaterThan(a: string, b: string): boolean { + return compareSemver(a, b) === 1 +} + +/** + * Checks if version a is less than version b + * @param a - First version string + * @param b - Second version string + * @returns true if a < b, false otherwise + */ +export function isLessThan(a: string, b: string): boolean { + return compareSemver(a, b) === -1 +} + +/** + * Checks if version a is equal to version b + * @param a - First version string + * @param b - Second version string + * @returns true if a === b, false otherwise + */ +export function isEqual(a: string, b: string): boolean { + return compareSemver(a, b) === 0 +} + +/** + * Checks if version a is greater than or equal to version b + * @param a - First version string + * @param b - Second version string + * @returns true if a >= b, false otherwise + */ +export function isGreaterThanOrEqual(a: string, b: string): boolean { + const result = compareSemver(a, b) + return result === 1 || result === 0 +} + +/** + * Checks if version a is less than or equal to version b + * @param a - First version string + * @param b - Second version string + * @returns true if a <= b, false otherwise + */ +export function isLessThanOrEqual(a: string, b: string): boolean { + const result = compareSemver(a, b) + return result === -1 || result === 0 +} + +/** + * Checks if a version string is valid + * @param version - The version string to validate + * @returns true if the version is valid, false otherwise + */ +export function isValidSemver(version: string): boolean { + return parseSemver(version) !== null +} diff --git a/apps/studio/pages/api/platform/projects/[ref]/run-lints.ts b/apps/studio/pages/api/platform/projects/[ref]/run-lints.ts index b66f919a1e30c..51fe4a29c31ca 100644 --- a/apps/studio/pages/api/platform/projects/[ref]/run-lints.ts +++ b/apps/studio/pages/api/platform/projects/[ref]/run-lints.ts @@ -11,7 +11,19 @@ async function handler(req: NextApiRequest, res: NextApiResponse) { switch (method) { case 'GET': - const { data, error } = await getLints({ headers: constructHeaders(req.headers) }) + /** + * [Joshen] JFYI technically the exposed schemas is being set here via docker-compose.yml + * https://github.com/supabase/supabase/blob/master/docker/docker-compose.yml#L183 + * https://github.com/supabase/supabase/blob/474a78721e510301d15ca9dbd41f05ce10fa29e5/docker/.env.example#L55 + * + * But i noticed that the local API route on config/postgrest.ts has currently hardcoded db_schema to `public, storage` + * As such, this is only just a temporary patch here that we're hardcoding the exposed schemas but we will need to figure + * out how to get the dashboard to retrieve the values from docker-compose + */ + const { data, error } = await getLints({ + headers: constructHeaders(req.headers), + exposedSchemas: 'public, storage', + }) if (error) { return res.status(400).json(error) diff --git a/packages/ui-patterns/src/form/Layout/FormLayout.tsx b/packages/ui-patterns/src/form/Layout/FormLayout.tsx index a3379cfa368f6..54d41edfe5751 100644 --- a/packages/ui-patterns/src/form/Layout/FormLayout.tsx +++ b/packages/ui-patterns/src/form/Layout/FormLayout.tsx @@ -42,11 +42,11 @@ const ContainerVariants = cva('relative grid gap-10', { false: '', }, layout: { - horizontal: 'flex flex-col gap-2 md:grid @lg:grid-cols-12', + horizontal: 'flex flex-col gap-2 md:grid md:grid-cols-12', vertical: 'flex flex-col gap-2', flex: 'flex flex-row gap-3', 'flex-row-reverse': - 'flex flex-col-reverse gap-2 @lg:gap-6 @lg:flex-row-reverse @lg:justify-between', + 'flex flex-col-reverse gap-2 md:gap-6 md:flex-row-reverse md:justify-between', }, flex: { true: '', @@ -233,7 +233,7 @@ const FlexContainer = cva('', { }, { layout: 'flex-row-reverse', - className: 'flex flex-col justify-center items-start @lg:items-end shrink-0', + className: 'flex flex-col justify-center items-start md:items-end shrink-0', }, ], }) @@ -359,85 +359,83 @@ export const FormLayout = React.forwardRef< ) return ( -
-
- {flex && ( -
- {props.children} - {layout === 'flex-row-reverse' && renderError} +
+ {flex && ( +
+ {props.children} + {layout === 'flex-row-reverse' && renderError} +
+ )} + {hasLabel || labelOptional || layout === 'horizontal' ? ( + <> +
+ {hasLabel && isReactForm ? ( + + + + ) : ( + + + + )} + {labelOptional && ( + + {labelOptional} + + )} + {flex && ( + <> + {renderDescription} + {layout !== 'flex-row-reverse' && renderError} + + )}
- )} - {hasLabel || labelOptional || layout === 'horizontal' ? ( + + ) : null} + {!flex && ( +
<>
- {hasLabel && isReactForm ? ( - - - - ) : ( - - - - )} - {labelOptional && ( - - {labelOptional} - - )} - {flex && ( - <> - {renderDescription} - {layout !== 'flex-row-reverse' && renderError} - + className={cn( + NonBoxInputContainer({ + nonBoxInput, + // @ts-expect-error + label, + layout, + }) )} + data-formlayout-id={'nonBoxInputContainer'} + > + {props.children}
+ {renderError} + {renderDescription} - ) : null} - {!flex && ( -
- <> -
- {props.children} -
- {renderError} - {renderDescription} - -
- )} -
+
+ )}
) }