diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index a07fdfbcd..64d2a919f 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -35,7 +35,8 @@ runs: echo "Extension directory: $(pg_config --sharedir)/extension" echo "Library directory: $(pg_config --pkglibdir)" - # Clone and build plpgsql_check + # Clone and build plpgsql_check (clone to /tmp to avoid workspace conflicts) + cd /tmp git clone https://github.com/okbob/plpgsql_check.git cd plpgsql_check @@ -53,30 +54,77 @@ runs: echo "Extension library files:" ls -la "$(pg_config --pkglibdir)/" | grep plpgsql || echo "No plpgsql_check library found" - # Install the pglpgsql_check extension on macOS (Part 2) - - name: Create extension in database + # Install the pglinter extension on macOS (pgrx-based Rust extension) + - name: Install and compile pglinter + if: runner.os == 'macOS' + shell: bash + run: | + # First, ensure we're using the same PostgreSQL that the action installed + export PATH="$(pg_config --bindir):$PATH" + + # Install cargo-pgrx (version must match pglinter's pgrx dependency) + cargo install cargo-pgrx --version 0.16.1 --locked + + # Determine postgres version for pgrx init + PG_VERSION=$(pg_config --version | grep -oE '[0-9]+' | head -1) + echo "PostgreSQL version: $PG_VERSION" + + # Initialize pgrx for the installed PostgreSQL version + cargo pgrx init --pg${PG_VERSION} $(which pg_config) + + # Clone and build pglinter (clone to /tmp to avoid workspace conflicts) + cd /tmp + git clone https://github.com/pmpetit/pglinter.git + cd pglinter + + # Install using pgrx + cargo pgrx install --pg-config $(which pg_config) --release + + # Verify installation + echo "Extension control files:" + ls -la "$(pg_config --sharedir)/extension/" | grep pglinter || echo "No pglinter found" + + echo "Extension library files:" + ls -la "$(pg_config --pkglibdir)/" | grep pglinter || echo "No pglinter library found" + + # Create extensions in database on macOS + - name: Create extensions in database if: runner.os == 'macOS' shell: bash env: PGSERVICE: ${{ steps.postgres.outputs.service-name }} run: | psql -c "CREATE EXTENSION plpgsql_check;" + psql -c "CREATE EXTENSION pglinter;" # Verify installation - psql -c "SELECT extname, extversion FROM pg_extension WHERE extname = 'plpgsql_check';" + psql -c "SELECT extname, extversion FROM pg_extension WHERE extname IN ('plpgsql_check', 'pglinter');" + + # For Linux, use custom Docker image with plpgsql_check and pglinter + - name: Set up Docker Buildx + if: runner.os == 'Linux' + uses: docker/setup-buildx-action@v3 - # For Linux, use custom Docker image with plpgsql_check - - name: Build and start PostgreSQL with plpgsql_check + - name: Build PostgreSQL image with cache + if: runner.os == 'Linux' + uses: docker/build-push-action@v5 + with: + context: . + load: true + tags: postgres-language-server-dev:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Start PostgreSQL container if: runner.os == 'Linux' shell: bash run: | - docker build -t postgres-plpgsql-check:latest . docker run -d --name postgres \ -e POSTGRES_USER=postgres \ -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_DB=postgres \ -p 5432:5432 \ - postgres-plpgsql-check:latest + postgres-language-server-dev:latest # Wait for postgres to be ready for _ in {1..30}; do if docker exec postgres pg_isready -U postgres; then @@ -85,3 +133,13 @@ runs: sleep 1 done + # Create extensions in postgres database only (NOT template1) + # This avoids polluting test databases - tests that need extensions can create them explicitly + echo "Creating extensions in postgres database..." + docker exec postgres psql -U postgres -c "CREATE SCHEMA IF NOT EXISTS extensions;" + docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" + docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" + + # Show extension status + docker exec postgres psql -U postgres -c "SELECT extname, extversion FROM pg_extension WHERE extname IN ('plpgsql_check', 'pglinter');" + diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 45d4f9879..e74a551cc 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -96,15 +96,26 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # we need to use the same database as we do locally for sqlx prepare to output the same hashes - - name: Build and start PostgreSQL with plpgsql_check + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build PostgreSQL image with cache + uses: docker/build-push-action@v5 + with: + context: . + load: true + tags: postgres-language-server-dev:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Start PostgreSQL run: | - docker build -t postgres-plpgsql-check:latest . docker run -d --name postgres \ -e POSTGRES_USER=postgres \ -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_DB=postgres \ -p 5432:5432 \ - postgres-plpgsql-check:latest + postgres-language-server-dev:latest # Wait for postgres to be ready for _ in {1..30}; do if docker exec postgres pg_isready -U postgres; then diff --git a/Cargo.lock b/Cargo.lock index 27377ace1..2cb48b9c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2696,6 +2696,7 @@ dependencies = [ "pgls_env", "pgls_fs", "pgls_lsp", + "pgls_test_utils", "pgls_text_edit", "pgls_workspace", "quick-junit", @@ -2703,6 +2704,7 @@ dependencies = [ "rustc-hash 2.1.0", "serde", "serde_json", + "sqlx", "tikv-jemallocator", "tokio", "tracing", @@ -2919,6 +2921,24 @@ dependencies = [ "quote", ] +[[package]] +name = "pgls_pglinter" +version = "0.0.0" +dependencies = [ + "insta", + "pgls_analyse", + "pgls_console", + "pgls_diagnostics", + "pgls_diagnostics_categories", + "pgls_schema_cache", + "pgls_test_utils", + "rustc-hash 2.1.0", + "serde", + "serde_json", + "sqlx", + "ureq", +] + [[package]] name = "pgls_plpgsql_check" version = "0.0.0" @@ -3005,6 +3025,7 @@ dependencies = [ "serde", "serde_json", "sqlx", + "ureq", ] [[package]] @@ -3162,6 +3183,7 @@ dependencies = [ "pgls_query", "pgls_query_ext", "pgls_schema_cache", + "pgls_splinter", "pgls_statement_splitter", "pgls_suppressions", "pgls_test_utils", @@ -5069,6 +5091,8 @@ dependencies = [ "once_cell", "rustls", "rustls-pki-types", + "serde", + "serde_json", "url", "webpki-roots 0.26.11", ] @@ -5617,11 +5641,13 @@ dependencies = [ "pgls_analyser", "pgls_diagnostics", "pgls_env", + "pgls_pglinter", "pgls_splinter", "pgls_workspace", "proc-macro2", "pulldown-cmark", "quote", + "regex", "xtask", ] diff --git a/Cargo.toml b/Cargo.toml index 9b09e8ea4..32c3c8773 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ pgls_lexer = { path = "./crates/pgls_lexer", version = "0.0.0" pgls_lexer_codegen = { path = "./crates/pgls_lexer_codegen", version = "0.0.0" } pgls_lsp = { path = "./crates/pgls_lsp", version = "0.0.0" } pgls_markup = { path = "./crates/pgls_markup", version = "0.0.0" } +pgls_pglinter = { path = "./crates/pgls_pglinter", version = "0.0.0" } pgls_plpgsql_check = { path = "./crates/pgls_plpgsql_check", version = "0.0.0" } pgls_query = { path = "./crates/pgls_query", version = "0.0.0" } pgls_query_ext = { path = "./crates/pgls_query_ext", version = "0.0.0" } diff --git a/Dockerfile b/Dockerfile index 10353bb27..577e61843 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,15 +2,38 @@ FROM postgres:15 # Install build dependencies RUN apt-get update && \ - apt-get install -y postgresql-server-dev-15 gcc make git && \ + apt-get install -y postgresql-server-dev-15 gcc make git curl pkg-config libssl-dev libclang-dev clang && \ + # Install plpgsql_check (C extension - simple make install) cd /tmp && \ git clone https://github.com/okbob/plpgsql_check.git && \ cd plpgsql_check && \ make && \ make install && \ - apt-get remove -y postgresql-server-dev-15 gcc make git && \ + cd /tmp && \ + rm -rf /tmp/plpgsql_check && \ + # Install Rust for pglinter (pgrx-based extension) + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y && \ + . $HOME/.cargo/env && \ + # Install cargo-pgrx (version must match pglinter's pgrx dependency) + cargo install cargo-pgrx --version 0.16.1 --locked && \ + # Initialize pgrx for PostgreSQL 15 + cargo pgrx init --pg15 $(which pg_config) && \ + # Clone and build pglinter + cd /tmp && \ + git clone https://github.com/pmpetit/pglinter.git && \ + cd pglinter && \ + cargo pgrx install --pg-config $(which pg_config) --release && \ + # Cleanup Rust and build dependencies + rm -rf /tmp/pglinter $HOME/.cargo $HOME/.rustup && \ + apt-get remove -y gcc make git curl pkg-config libssl-dev libclang-dev clang && \ apt-get autoremove -y && \ - rm -rf /tmp/plpgsql_check /var/lib/apt/lists/* + rm -rf /var/lib/apt/lists/* -# Add initialization script directly -RUN echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" > /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file +# Add initialization script for extensions +# Only create in postgres database (NOT template1) to avoid polluting test databases +# Tests that need extensions can create them explicitly +RUN printf '%s\n' \ + "CREATE SCHEMA IF NOT EXISTS extensions;" \ + "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" \ + "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" \ + > /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file diff --git a/crates/pgls_cli/Cargo.toml b/crates/pgls_cli/Cargo.toml index 1b4cace9d..fa7fd8c82 100644 --- a/crates/pgls_cli/Cargo.toml +++ b/crates/pgls_cli/Cargo.toml @@ -52,8 +52,10 @@ mimalloc = "0.1.43" tikv-jemallocator = "0.6.0" [dev-dependencies] -assert_cmd = "2.0.16" -insta = { workspace = true, features = ["yaml"] } +assert_cmd = "2.0.16" +insta = { workspace = true, features = ["yaml"] } +pgls_test_utils = { workspace = true } +sqlx = { workspace = true } [lib] doctest = false diff --git a/crates/pgls_cli/src/commands/dblint.rs b/crates/pgls_cli/src/commands/dblint.rs index 592d26a25..b9ce13044 100644 --- a/crates/pgls_cli/src/commands/dblint.rs +++ b/crates/pgls_cli/src/commands/dblint.rs @@ -3,6 +3,7 @@ use std::time::Instant; use crate::cli_options::CliOptions; use crate::reporter::Report; use crate::{CliDiagnostic, CliSession, VcsIntegration}; +use pgls_analyse::RuleCategoriesBuilder; use pgls_configuration::PartialConfiguration; use pgls_diagnostics::Error; use pgls_workspace::features::diagnostics::{PullDatabaseDiagnosticsParams, PullDiagnosticsResult}; @@ -24,10 +25,17 @@ pub fn dblint( let start = Instant::now(); + let params = PullDatabaseDiagnosticsParams { + categories: RuleCategoriesBuilder::default().all().build(), + max_diagnostics, + only: Vec::new(), // Uses configuration settings + skip: Vec::new(), // Uses configuration settings + }; + let PullDiagnosticsResult { diagnostics, skipped_diagnostics, - } = workspace.pull_db_diagnostics(PullDatabaseDiagnosticsParams { max_diagnostics })?; + } = workspace.pull_db_diagnostics(params)?; let report = Report::new( diagnostics.into_iter().map(Error::from).collect(), diff --git a/crates/pgls_cli/src/commands/mod.rs b/crates/pgls_cli/src/commands/mod.rs index 3dac093e5..790f228e6 100644 --- a/crates/pgls_cli/src/commands/mod.rs +++ b/crates/pgls_cli/src/commands/mod.rs @@ -24,7 +24,7 @@ pub enum PgLSCommand { #[bpaf(command)] Version(#[bpaf(external(cli_options), hide_usage)] CliOptions), - /// Runs everything to the requested files. + /// Lints your database schema. #[bpaf(command)] Dblint { #[bpaf(external(partial_configuration), hide_usage, optional)] diff --git a/crates/pgls_cli/tests/assert_dblint.rs b/crates/pgls_cli/tests/assert_dblint.rs new file mode 100644 index 000000000..558449dae --- /dev/null +++ b/crates/pgls_cli/tests/assert_dblint.rs @@ -0,0 +1,117 @@ +use assert_cmd::Command; +use insta::assert_snapshot; +use sqlx::PgPool; +use std::process::ExitStatus; + +const BIN: &str = "postgres-language-server"; + +/// Get database URL from the pool's connect options +/// Uses the known docker-compose credentials (postgres:postgres) +fn get_database_url(pool: &PgPool) -> String { + let opts = pool.connect_options(); + format!( + "postgres://postgres:postgres@{}:{}/{}", + opts.get_host(), + opts.get_port(), + opts.get_database().unwrap_or("postgres") + ) +} + +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +#[cfg_attr( + target_os = "windows", + ignore = "snapshot expectations only validated on unix-like platforms" +)] +async fn dblint_empty_database_snapshot(test_db: PgPool) { + let url = get_database_url(&test_db); + let output = run_dblint(&url, &[]); + assert_snapshot!(output); +} + +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +#[cfg_attr( + target_os = "windows", + ignore = "snapshot expectations only validated on unix-like platforms" +)] +async fn dblint_detects_issues_snapshot(test_db: PgPool) { + // Setup: create table without primary key (triggers noPrimaryKey rule) + sqlx::raw_sql("CREATE TABLE test_no_pk (id int, name text)") + .execute(&test_db) + .await + .expect("Failed to create test table"); + + let url = get_database_url(&test_db); + let output = run_dblint(&url, &[]); + assert_snapshot!(output); +} + +#[test] +#[cfg_attr( + target_os = "windows", + ignore = "snapshot expectations only validated on unix-like platforms" +)] +fn dblint_no_database_snapshot() { + // Test that dblint completes gracefully when no database is configured + let mut cmd = Command::cargo_bin(BIN).expect("binary not built"); + let output = cmd + .args(["dblint", "--disable-db", "--log-level", "none"]) + .output() + .expect("failed to run CLI"); + + let normalized = normalize_output( + output.status, + &String::from_utf8_lossy(&output.stdout), + &String::from_utf8_lossy(&output.stderr), + ); + assert_snapshot!(normalized); +} + +fn run_dblint(url: &str, args: &[&str]) -> String { + let mut cmd = Command::cargo_bin(BIN).expect("binary not built"); + let mut full_args = vec!["dblint", "--connection-string", url, "--log-level", "none"]; + full_args.extend_from_slice(args); + + let output = cmd.args(full_args).output().expect("failed to run CLI"); + + normalize_output( + output.status, + &String::from_utf8_lossy(&output.stdout), + &String::from_utf8_lossy(&output.stderr), + ) +} + +fn normalize_output(status: ExitStatus, stdout: &str, stderr: &str) -> String { + let normalized_stdout = normalize_durations(stdout); + let status_label = if status.success() { + "success" + } else { + "failure" + }; + format!( + "status: {status_label}\nstdout:\n{}\nstderr:\n{}\n", + normalized_stdout.trim_end(), + stderr.trim_end() + ) +} + +fn normalize_durations(input: &str) -> String { + let mut content = input.to_owned(); + + let mut search_start = 0; + while let Some(relative) = content[search_start..].find(" in ") { + let start = search_start + relative + 4; + if let Some(end_rel) = content[start..].find('.') { + let end = start + end_rel; + if content[start..end].chars().any(|c| c.is_ascii_digit()) { + content.replace_range(start..end, ""); + search_start = start + "".len() + 1; + continue; + } + search_start = end + 1; + } else { + break; + } + } + + content +} diff --git a/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_detects_issues_snapshot.snap b/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_detects_issues_snapshot.snap new file mode 100644 index 000000000..db0348a93 --- /dev/null +++ b/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_detects_issues_snapshot.snap @@ -0,0 +1,20 @@ +--- +source: crates/pgls_cli/tests/assert_dblint.rs +expression: output +snapshot_kind: text +--- +status: success +stdout: +Warning: Deprecated config filename detected. Use 'postgres-language-server.jsonc'. + +Command completed in . +stderr: +splinter/performance/noPrimaryKey ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + i Table \`public.test_no_pk\` does not have a primary key + + Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale. + + i table: public.test_no_pk + + i Remediation: https://supabase.com/docs/guides/database/database-linter?lint=0004_no_primary_key diff --git a/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_empty_database_snapshot.snap b/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_empty_database_snapshot.snap new file mode 100644 index 000000000..449797f39 --- /dev/null +++ b/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_empty_database_snapshot.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgls_cli/tests/assert_dblint.rs +expression: output +snapshot_kind: text +--- +status: success +stdout: +Warning: Deprecated config filename detected. Use 'postgres-language-server.jsonc'. + +Command completed in . +stderr: diff --git a/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_no_database_snapshot.snap b/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_no_database_snapshot.snap new file mode 100644 index 000000000..0cbb89765 --- /dev/null +++ b/crates/pgls_cli/tests/snapshots/assert_dblint__dblint_no_database_snapshot.snap @@ -0,0 +1,11 @@ +--- +source: crates/pgls_cli/tests/assert_dblint.rs +expression: normalized +snapshot_kind: text +--- +status: success +stdout: +Warning: Deprecated config filename detected. Use 'postgres-language-server.jsonc'. + +Command completed in . +stderr: diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap index 8ef51386a..fb155719f 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap index a21fe79fe..c6f434c5c 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap index 629e98bc6..b43206c35 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap index 0d203a4c1..4b2df051d 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap index a6c1b9f57..34ab698ec 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap index ca024d628..cd43623d7 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap index 9ccf98a9a..bbf3b1858 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap index 463bca8f4..e466f559a 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap index 76dc39d7e..3d938ecd1 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap index 0d300f984..6ac18aaed 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap index a291aad9e..5fc56a8eb 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap index d3db86856..430fcaf36 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap index 758932c93..59224dd38 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_configuration/src/lib.rs b/crates/pgls_configuration/src/lib.rs index 1e8872af0..3f2daad33 100644 --- a/crates/pgls_configuration/src/lib.rs +++ b/crates/pgls_configuration/src/lib.rs @@ -7,6 +7,7 @@ pub mod diagnostics; pub mod files; pub mod linter; pub mod migrations; +pub mod pglinter; pub mod plpgsql_check; pub mod rules; pub mod splinter; @@ -32,6 +33,9 @@ pub use linter::{ use migrations::{ MigrationsConfiguration, PartialMigrationsConfiguration, partial_migrations_configuration, }; +use pglinter::{ + PartialPglinterConfiguration, PglinterConfiguration, partial_pglinter_configuration, +}; use pgls_env::PGLS_WEBSITE; use plpgsql_check::{ PartialPlPgSqlCheckConfiguration, PlPgSqlCheckConfiguration, @@ -42,6 +46,9 @@ pub use rules::{ RuleWithFixOptions, RuleWithOptions, }; use serde::{Deserialize, Serialize}; +use splinter::{ + PartialSplinterConfiguration, SplinterConfiguration, partial_splinter_configuration, +}; pub use typecheck::{ PartialTypecheckConfiguration, TypecheckConfiguration, partial_typecheck_configuration, }; @@ -86,6 +93,14 @@ pub struct Configuration { #[partial(type, bpaf(external(partial_linter_configuration), optional))] pub linter: LinterConfiguration, + /// The configuration for splinter + #[partial(type, bpaf(external(partial_splinter_configuration), optional))] + pub splinter: SplinterConfiguration, + + /// The configuration for pglinter + #[partial(type, bpaf(external(partial_pglinter_configuration), optional))] + pub pglinter: PglinterConfiguration, + /// The configuration for type checking #[partial(type, bpaf(external(partial_typecheck_configuration), optional))] pub typecheck: TypecheckConfiguration, @@ -127,6 +142,14 @@ impl PartialConfiguration { }), ..Default::default() }), + splinter: Some(PartialSplinterConfiguration { + enabled: Some(true), + ..Default::default() + }), + pglinter: Some(PartialPglinterConfiguration { + enabled: Some(false), // Disabled by default since pglinter extension might not be installed + ..Default::default() + }), typecheck: Some(PartialTypecheckConfiguration { ..Default::default() }), diff --git a/crates/pgls_configuration/src/linter/rules.rs b/crates/pgls_configuration/src/linter/rules.rs index 1abb465f5..a78cf28c9 100644 --- a/crates/pgls_configuration/src/linter/rules.rs +++ b/crates/pgls_configuration/src/linter/rules.rs @@ -46,6 +46,7 @@ impl std::str::FromStr for RuleGroup { } #[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] #[cfg_attr(feature = "schema", derive(JsonSchema))] +#[cfg_attr(feature = "schema", schemars(rename = "LinterRules"))] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct Rules { #[doc = r" It enables the lint rules recommended by Postgres Language Server. `true` by default."] diff --git a/crates/pgls_configuration/src/pglinter/mod.rs b/crates/pgls_configuration/src/pglinter/mod.rs new file mode 100644 index 000000000..f676abf1f --- /dev/null +++ b/crates/pgls_configuration/src/pglinter/mod.rs @@ -0,0 +1,41 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +mod rules; +use biome_deserialize_macros::{Merge, Partial}; +use bpaf::Bpaf; +pub use rules::*; +use serde::{Deserialize, Serialize}; +#[derive(Clone, Debug, Deserialize, Eq, Partial, PartialEq, Serialize)] +#[partial(derive(Bpaf, Clone, Eq, Merge, PartialEq))] +#[partial(cfg_attr(feature = "schema", derive(schemars::JsonSchema)))] +#[partial(serde(rename_all = "camelCase", default, deny_unknown_fields))] +pub struct PglinterConfiguration { + #[doc = r" if `false`, it disables the feature and the linter won't be executed. `true` by default"] + #[partial(bpaf(hide))] + pub enabled: bool, + #[doc = r" List of rules"] + #[partial(bpaf(pure(Default::default()), optional, hide))] + pub rules: Rules, +} +impl PglinterConfiguration { + pub const fn is_disabled(&self) -> bool { + !self.enabled + } +} +impl Default for PglinterConfiguration { + fn default() -> Self { + Self { + enabled: true, + rules: Default::default(), + } + } +} +impl PartialPglinterConfiguration { + pub const fn is_disabled(&self) -> bool { + matches!(self.enabled, Some(false)) + } + pub fn get_rules(&self) -> Rules { + self.rules.clone().unwrap_or_default() + } +} diff --git a/crates/pgls_configuration/src/pglinter/rules.rs b/crates/pgls_configuration/src/pglinter/rules.rs new file mode 100644 index 000000000..b22721a80 --- /dev/null +++ b/crates/pgls_configuration/src/pglinter/rules.rs @@ -0,0 +1,916 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rules::{RuleConfiguration, RulePlainConfiguration}; +use biome_deserialize_macros::Merge; +use pgls_analyse::RuleFilter; +use pgls_analyser::RuleOptions; +use pgls_diagnostics::{Category, Severity}; +use rustc_hash::FxHashSet; +#[cfg(feature = "schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[derive( + Clone, + Copy, + Debug, + Eq, + Hash, + Merge, + Ord, + PartialEq, + PartialOrd, + serde :: Deserialize, + serde :: Serialize, +)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase")] +pub enum RuleGroup { + Base, + Cluster, + Schema, +} +impl RuleGroup { + pub const fn as_str(self) -> &'static str { + match self { + Self::Base => Base::GROUP_NAME, + Self::Cluster => Cluster::GROUP_NAME, + Self::Schema => Schema::GROUP_NAME, + } + } +} +impl std::str::FromStr for RuleGroup { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + Base::GROUP_NAME => Ok(Self::Base), + Cluster::GROUP_NAME => Ok(Self::Cluster), + Schema::GROUP_NAME => Ok(Self::Schema), + _ => Err("This rule group doesn't exist."), + } + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[cfg_attr(feature = "schema", schemars(rename = "PglinterRules"))] +#[serde(rename_all = "camelCase", deny_unknown_fields)] +pub struct Rules { + #[doc = r" It enables the lint rules recommended by Postgres Language Server. `true` by default."] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules. The rules that belong to `nursery` won't be enabled."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub cluster: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub schema: Option, +} +impl Rules { + #[doc = r" Checks if the code coming from [pgls_diagnostics::Diagnostic] corresponds to a rule."] + #[doc = r" Usually the code is built like {group}/{rule_name}"] + pub fn has_rule(group: RuleGroup, rule_name: &str) -> Option<&'static str> { + match group { + RuleGroup::Base => Base::has_rule(rule_name), + RuleGroup::Cluster => Cluster::has_rule(rule_name), + RuleGroup::Schema => Schema::has_rule(rule_name), + } + } + #[doc = r" Given a category coming from [Diagnostic](pgls_diagnostics::Diagnostic), this function returns"] + #[doc = r" the [Severity](pgls_diagnostics::Severity) associated to the rule, if the configuration changed it."] + #[doc = r" If the severity is off or not set, then the function returns the default severity of the rule,"] + #[doc = r" which is configured at the rule definition."] + #[doc = r" The function can return `None` if the rule is not properly configured."] + pub fn get_severity_from_code(&self, category: &Category) -> Option { + let mut split_code = category.name().split('/'); + let _category_prefix = split_code.next(); + debug_assert_eq!(_category_prefix, Some("pglinter")); + let group = ::from_str(split_code.next()?).ok()?; + let rule_name = split_code.next()?; + let rule_name = Self::has_rule(group, rule_name)?; + let severity = match group { + RuleGroup::Base => self + .base + .as_ref() + .and_then(|group| group.get_rule_configuration(rule_name)) + .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) + .map_or_else(|| Base::severity(rule_name), |(level, _)| level.into()), + RuleGroup::Cluster => self + .cluster + .as_ref() + .and_then(|group| group.get_rule_configuration(rule_name)) + .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) + .map_or_else(|| Cluster::severity(rule_name), |(level, _)| level.into()), + RuleGroup::Schema => self + .schema + .as_ref() + .and_then(|group| group.get_rule_configuration(rule_name)) + .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) + .map_or_else(|| Schema::severity(rule_name), |(level, _)| level.into()), + }; + Some(severity) + } + #[doc = r" Ensure that `recommended` is set to `true` or implied."] + pub fn set_recommended(&mut self) { + if self.all != Some(true) && self.recommended == Some(false) { + self.recommended = Some(true) + } + if let Some(group) = &mut self.base { + group.recommended = None; + } + if let Some(group) = &mut self.cluster { + group.recommended = None; + } + if let Some(group) = &mut self.schema { + group.recommended = None; + } + } + pub(crate) const fn is_recommended_false(&self) -> bool { + matches!(self.recommended, Some(false)) + } + pub(crate) const fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + #[doc = r" It returns the enabled rules by default."] + #[doc = r""] + #[doc = r" The enabled rules are calculated from the difference with the disabled rules."] + pub fn as_enabled_rules(&self) -> FxHashSet> { + let mut enabled_rules = FxHashSet::default(); + let mut disabled_rules = FxHashSet::default(); + if let Some(group) = self.base.as_ref() { + group.collect_preset_rules( + self.is_all_true(), + !self.is_recommended_false(), + &mut enabled_rules, + ); + enabled_rules.extend(&group.get_enabled_rules()); + disabled_rules.extend(&group.get_disabled_rules()); + } else if self.is_all_true() { + enabled_rules.extend(Base::all_rules_as_filters()); + } else if !self.is_recommended_false() { + enabled_rules.extend(Base::recommended_rules_as_filters()); + } + if let Some(group) = self.cluster.as_ref() { + group.collect_preset_rules( + self.is_all_true(), + !self.is_recommended_false(), + &mut enabled_rules, + ); + enabled_rules.extend(&group.get_enabled_rules()); + disabled_rules.extend(&group.get_disabled_rules()); + } else if self.is_all_true() { + enabled_rules.extend(Cluster::all_rules_as_filters()); + } else if !self.is_recommended_false() { + enabled_rules.extend(Cluster::recommended_rules_as_filters()); + } + if let Some(group) = self.schema.as_ref() { + group.collect_preset_rules( + self.is_all_true(), + !self.is_recommended_false(), + &mut enabled_rules, + ); + enabled_rules.extend(&group.get_enabled_rules()); + disabled_rules.extend(&group.get_disabled_rules()); + } else if self.is_all_true() { + enabled_rules.extend(Schema::all_rules_as_filters()); + } else if !self.is_recommended_false() { + enabled_rules.extend(Schema::recommended_rules_as_filters()); + } + enabled_rules.difference(&disabled_rules).copied().collect() + } + #[doc = r" It returns the disabled rules by configuration."] + pub fn as_disabled_rules(&self) -> FxHashSet> { + let mut disabled_rules = FxHashSet::default(); + if let Some(group) = self.base.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + if let Some(group) = self.cluster.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + if let Some(group) = self.schema.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + disabled_rules + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase", default, deny_unknown_fields)] +#[doc = r" A list of rules that belong to this group"] +pub struct Base { + #[doc = r" It enables the recommended rules for this group"] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules for this group."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[doc = "CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns"] + #[serde(skip_serializing_if = "Option::is_none")] + pub composite_primary_key_too_many_columns: Option>, + #[doc = "HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_objects_with_uppercase: Option>, + #[doc = "HowManyRedudantIndex (B002): Count number of redundant index vs nb index."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_redudant_index: Option>, + #[doc = "HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_table_without_index_on_fk: Option>, + #[doc = "HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_table_without_primary_key: Option>, + #[doc = "HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_never_selected: Option>, + #[doc = "HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_fk_mismatch: Option>, + #[doc = "HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_fk_outside_schema: Option>, + #[doc = "HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_reserved_keywords: Option>, + #[doc = "HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_same_trigger: Option>, + #[doc = "HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)"] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_unused_index: Option>, + #[doc = "SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners."] + #[serde(skip_serializing_if = "Option::is_none")] + pub several_table_owner_in_schema: Option>, +} +impl Base { + const GROUP_NAME: &'static str = "base"; + pub(crate) const GROUP_RULES: &'static [&'static str] = &[ + "compositePrimaryKeyTooManyColumns", + "howManyObjectsWithUppercase", + "howManyRedudantIndex", + "howManyTableWithoutIndexOnFk", + "howManyTableWithoutPrimaryKey", + "howManyTablesNeverSelected", + "howManyTablesWithFkMismatch", + "howManyTablesWithFkOutsideSchema", + "howManyTablesWithReservedKeywords", + "howManyTablesWithSameTrigger", + "howManyUnusedIndex", + "severalTableOwnerInSchema", + ]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11]), + ]; + const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11]), + ]; + #[doc = r" Retrieves the recommended rules"] + pub(crate) fn is_recommended_true(&self) -> bool { + matches!(self.recommended, Some(true)) + } + pub(crate) fn is_recommended_unset(&self) -> bool { + self.recommended.is_none() + } + pub(crate) fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + pub(crate) fn is_all_unset(&self) -> bool { + self.all.is_none() + } + pub(crate) fn get_enabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.composite_primary_key_too_many_columns.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.how_many_objects_with_uppercase.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.how_many_redudant_index.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.how_many_table_without_index_on_fk.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.how_many_table_without_primary_key.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + if let Some(rule) = self.how_many_tables_never_selected.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_mismatch.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_outside_schema.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7])); + } + } + if let Some(rule) = self.how_many_tables_with_reserved_keywords.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8])); + } + } + if let Some(rule) = self.how_many_tables_with_same_trigger.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9])); + } + } + if let Some(rule) = self.how_many_unused_index.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10])); + } + } + if let Some(rule) = self.several_table_owner_in_schema.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11])); + } + } + index_set + } + pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.composite_primary_key_too_many_columns.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.how_many_objects_with_uppercase.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.how_many_redudant_index.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.how_many_table_without_index_on_fk.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.how_many_table_without_primary_key.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + if let Some(rule) = self.how_many_tables_never_selected.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_mismatch.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_outside_schema.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7])); + } + } + if let Some(rule) = self.how_many_tables_with_reserved_keywords.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8])); + } + } + if let Some(rule) = self.how_many_tables_with_same_trigger.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9])); + } + } + if let Some(rule) = self.how_many_unused_index.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10])); + } + } + if let Some(rule) = self.several_table_owner_in_schema.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11])); + } + } + index_set + } + #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] + pub(crate) fn has_rule(rule_name: &str) -> Option<&'static str> { + Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) + } + pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::RECOMMENDED_RULES_AS_FILTERS + } + pub(crate) fn all_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::ALL_RULES_AS_FILTERS + } + #[doc = r" Select preset rules"] + pub(crate) fn collect_preset_rules( + &self, + parent_is_all: bool, + parent_is_recommended: bool, + enabled_rules: &mut FxHashSet>, + ) { + if self.is_all_true() || self.is_all_unset() && parent_is_all { + enabled_rules.extend(Self::all_rules_as_filters()); + } else if self.is_recommended_true() + || self.is_recommended_unset() && self.is_all_unset() && parent_is_recommended + { + enabled_rules.extend(Self::recommended_rules_as_filters()); + } + } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + "compositePrimaryKeyTooManyColumns" => Severity::Warning, + "howManyObjectsWithUppercase" => Severity::Warning, + "howManyRedudantIndex" => Severity::Warning, + "howManyTableWithoutIndexOnFk" => Severity::Warning, + "howManyTableWithoutPrimaryKey" => Severity::Warning, + "howManyTablesNeverSelected" => Severity::Warning, + "howManyTablesWithFkMismatch" => Severity::Warning, + "howManyTablesWithFkOutsideSchema" => Severity::Warning, + "howManyTablesWithReservedKeywords" => Severity::Warning, + "howManyTablesWithSameTrigger" => Severity::Warning, + "howManyUnusedIndex" => Severity::Warning, + "severalTableOwnerInSchema" => Severity::Warning, + _ => unreachable!(), + } + } + pub(crate) fn get_rule_configuration( + &self, + rule_name: &str, + ) -> Option<(RulePlainConfiguration, Option)> { + match rule_name { + "compositePrimaryKeyTooManyColumns" => self + .composite_primary_key_too_many_columns + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyObjectsWithUppercase" => self + .how_many_objects_with_uppercase + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyRedudantIndex" => self + .how_many_redudant_index + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTableWithoutIndexOnFk" => self + .how_many_table_without_index_on_fk + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTableWithoutPrimaryKey" => self + .how_many_table_without_primary_key + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesNeverSelected" => self + .how_many_tables_never_selected + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithFkMismatch" => self + .how_many_tables_with_fk_mismatch + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithFkOutsideSchema" => self + .how_many_tables_with_fk_outside_schema + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithReservedKeywords" => self + .how_many_tables_with_reserved_keywords + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithSameTrigger" => self + .how_many_tables_with_same_trigger + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyUnusedIndex" => self + .how_many_unused_index + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "severalTableOwnerInSchema" => self + .several_table_owner_in_schema + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + _ => None, + } + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase", default, deny_unknown_fields)] +#[doc = r" A list of rules that belong to this group"] +pub struct Cluster { + #[doc = r" It enables the recommended rules for this group"] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules for this group."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[doc = "PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256."] + #[serde(skip_serializing_if = "Option::is_none")] + pub password_encryption_is_md5: Option>, + #[doc = "PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."] + #[serde(skip_serializing_if = "Option::is_none")] + pub pg_hba_entries_with_method_trust_or_password_should_not_exists: + Option>, + #[doc = "PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."] + #[serde(skip_serializing_if = "Option::is_none")] + pub pg_hba_entries_with_method_trust_should_not_exists: Option>, +} +impl Cluster { + const GROUP_NAME: &'static str = "cluster"; + pub(crate) const GROUP_RULES: &'static [&'static str] = &[ + "passwordEncryptionIsMd5", + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists", + "pgHbaEntriesWithMethodTrustShouldNotExists", + ]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + ]; + const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + ]; + #[doc = r" Retrieves the recommended rules"] + pub(crate) fn is_recommended_true(&self) -> bool { + matches!(self.recommended, Some(true)) + } + pub(crate) fn is_recommended_unset(&self) -> bool { + self.recommended.is_none() + } + pub(crate) fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + pub(crate) fn is_all_unset(&self) -> bool { + self.all.is_none() + } + pub(crate) fn get_enabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.password_encryption_is_md5.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_or_password_should_not_exists + .as_ref() + { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_should_not_exists + .as_ref() + { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + index_set + } + pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.password_encryption_is_md5.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_or_password_should_not_exists + .as_ref() + { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_should_not_exists + .as_ref() + { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + index_set + } + #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] + pub(crate) fn has_rule(rule_name: &str) -> Option<&'static str> { + Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) + } + pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::RECOMMENDED_RULES_AS_FILTERS + } + pub(crate) fn all_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::ALL_RULES_AS_FILTERS + } + #[doc = r" Select preset rules"] + pub(crate) fn collect_preset_rules( + &self, + parent_is_all: bool, + parent_is_recommended: bool, + enabled_rules: &mut FxHashSet>, + ) { + if self.is_all_true() || self.is_all_unset() && parent_is_all { + enabled_rules.extend(Self::all_rules_as_filters()); + } else if self.is_recommended_true() + || self.is_recommended_unset() && self.is_all_unset() && parent_is_recommended + { + enabled_rules.extend(Self::recommended_rules_as_filters()); + } + } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + "passwordEncryptionIsMd5" => Severity::Warning, + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => Severity::Warning, + "pgHbaEntriesWithMethodTrustShouldNotExists" => Severity::Warning, + _ => unreachable!(), + } + } + pub(crate) fn get_rule_configuration( + &self, + rule_name: &str, + ) -> Option<(RulePlainConfiguration, Option)> { + match rule_name { + "passwordEncryptionIsMd5" => self + .password_encryption_is_md5 + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => self + .pg_hba_entries_with_method_trust_or_password_should_not_exists + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "pgHbaEntriesWithMethodTrustShouldNotExists" => self + .pg_hba_entries_with_method_trust_should_not_exists + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + _ => None, + } + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase", default, deny_unknown_fields)] +#[doc = r" A list of rules that belong to this group"] +pub struct Schema { + #[doc = r" It enables the recommended rules for this group"] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules for this group."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[doc = "OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary)."] + #[serde(skip_serializing_if = "Option::is_none")] + pub owner_schema_is_internal_role: Option>, + #[doc = "SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match."] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_owner_do_not_match_table_owner: Option>, + #[doc = "SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy."] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_prefixed_or_suffixed_with_envt: Option>, + #[doc = "SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it."] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_with_default_role_not_granted: Option>, + #[doc = "UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects."] + #[serde(skip_serializing_if = "Option::is_none")] + pub unsecured_public_schema: Option>, +} +impl Schema { + const GROUP_NAME: &'static str = "schema"; + pub(crate) const GROUP_RULES: &'static [&'static str] = &[ + "ownerSchemaIsInternalRole", + "schemaOwnerDoNotMatchTableOwner", + "schemaPrefixedOrSuffixedWithEnvt", + "schemaWithDefaultRoleNotGranted", + "unsecuredPublicSchema", + ]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + ]; + const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + ]; + #[doc = r" Retrieves the recommended rules"] + pub(crate) fn is_recommended_true(&self) -> bool { + matches!(self.recommended, Some(true)) + } + pub(crate) fn is_recommended_unset(&self) -> bool { + self.recommended.is_none() + } + pub(crate) fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + pub(crate) fn is_all_unset(&self) -> bool { + self.all.is_none() + } + pub(crate) fn get_enabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.owner_schema_is_internal_role.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.schema_owner_do_not_match_table_owner.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.schema_prefixed_or_suffixed_with_envt.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.schema_with_default_role_not_granted.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.unsecured_public_schema.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + index_set + } + pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.owner_schema_is_internal_role.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.schema_owner_do_not_match_table_owner.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.schema_prefixed_or_suffixed_with_envt.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.schema_with_default_role_not_granted.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.unsecured_public_schema.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + index_set + } + #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] + pub(crate) fn has_rule(rule_name: &str) -> Option<&'static str> { + Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) + } + pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::RECOMMENDED_RULES_AS_FILTERS + } + pub(crate) fn all_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::ALL_RULES_AS_FILTERS + } + #[doc = r" Select preset rules"] + pub(crate) fn collect_preset_rules( + &self, + parent_is_all: bool, + parent_is_recommended: bool, + enabled_rules: &mut FxHashSet>, + ) { + if self.is_all_true() || self.is_all_unset() && parent_is_all { + enabled_rules.extend(Self::all_rules_as_filters()); + } else if self.is_recommended_true() + || self.is_recommended_unset() && self.is_all_unset() && parent_is_recommended + { + enabled_rules.extend(Self::recommended_rules_as_filters()); + } + } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + "ownerSchemaIsInternalRole" => Severity::Warning, + "schemaOwnerDoNotMatchTableOwner" => Severity::Warning, + "schemaPrefixedOrSuffixedWithEnvt" => Severity::Warning, + "schemaWithDefaultRoleNotGranted" => Severity::Warning, + "unsecuredPublicSchema" => Severity::Warning, + _ => unreachable!(), + } + } + pub(crate) fn get_rule_configuration( + &self, + rule_name: &str, + ) -> Option<(RulePlainConfiguration, Option)> { + match rule_name { + "ownerSchemaIsInternalRole" => self + .owner_schema_is_internal_role + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "schemaOwnerDoNotMatchTableOwner" => self + .schema_owner_do_not_match_table_owner + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "schemaPrefixedOrSuffixedWithEnvt" => self + .schema_prefixed_or_suffixed_with_envt + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "schemaWithDefaultRoleNotGranted" => self + .schema_with_default_role_not_granted + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "unsecuredPublicSchema" => self + .unsecured_public_schema + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + _ => None, + } + } +} +#[doc = r" Push the configured rules to the analyser"] +pub fn push_to_analyser_rules( + rules: &Rules, + metadata: &pgls_analyse::MetadataRegistry, + analyser_rules: &mut pgls_analyser::LinterRules, +) { + if let Some(rules) = rules.base.as_ref() { + for rule_name in Base::GROUP_RULES { + if let Some((_, Some(rule_options))) = rules.get_rule_configuration(rule_name) { + if let Some(rule_key) = metadata.find_rule("base", rule_name) { + analyser_rules.push_rule(rule_key, rule_options); + } + } + } + } + if let Some(rules) = rules.cluster.as_ref() { + for rule_name in Cluster::GROUP_RULES { + if let Some((_, Some(rule_options))) = rules.get_rule_configuration(rule_name) { + if let Some(rule_key) = metadata.find_rule("cluster", rule_name) { + analyser_rules.push_rule(rule_key, rule_options); + } + } + } + } + if let Some(rules) = rules.schema.as_ref() { + for rule_name in Schema::GROUP_RULES { + if let Some((_, Some(rule_options))) = rules.get_rule_configuration(rule_name) { + if let Some(rule_key) = metadata.find_rule("schema", rule_name) { + analyser_rules.push_rule(rule_key, rule_options); + } + } + } + } +} +#[test] +fn test_order() { + for items in Base::GROUP_RULES.windows(2) { + assert!(items[0] < items[1], "{} < {}", items[0], items[1]); + } + for items in Cluster::GROUP_RULES.windows(2) { + assert!(items[0] < items[1], "{} < {}", items[0], items[1]); + } + for items in Schema::GROUP_RULES.windows(2) { + assert!(items[0] < items[1], "{} < {}", items[0], items[1]); + } +} diff --git a/crates/pgls_configuration/src/rules/selector.rs b/crates/pgls_configuration/src/rules/selector.rs index 4627e2388..e206fff25 100644 --- a/crates/pgls_configuration/src/rules/selector.rs +++ b/crates/pgls_configuration/src/rules/selector.rs @@ -2,11 +2,12 @@ use pgls_analyse::RuleFilter; use std::str::FromStr; -/// Represents a rule group from any analyzer (linter or splinter) +/// Represents a rule group from any analyzer (linter, splinter, or pglinter) #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] pub enum AnalyzerGroup { Linter(crate::linter::RuleGroup), Splinter(crate::splinter::RuleGroup), + PgLinter(crate::pglinter::RuleGroup), } impl AnalyzerGroup { @@ -14,6 +15,7 @@ impl AnalyzerGroup { match self { Self::Linter(group) => group.as_str(), Self::Splinter(group) => group.as_str(), + Self::PgLinter(group) => group.as_str(), } } @@ -21,6 +23,7 @@ impl AnalyzerGroup { match self { Self::Linter(_) => "lint", Self::Splinter(_) => "splinter", + Self::PgLinter(_) => "pglinter", } } } @@ -57,6 +60,8 @@ impl FromStr for RuleSelector { ("lint", rest) } else if let Some(rest) = selector.strip_prefix("splinter/") { ("splinter", rest) + } else if let Some(rest) = selector.strip_prefix("pglinter/") { + ("pglinter", rest) } else { // Default to lint for backward compatibility ("lint", selector) @@ -84,6 +89,17 @@ impl FromStr for RuleSelector { Err("This rule doesn't exist.") } } + "pglinter" => { + let group = crate::pglinter::RuleGroup::from_str(group_name)?; + if let Some(rule_name) = crate::pglinter::Rules::has_rule(group, rule_name) { + Ok(RuleSelector::Rule( + AnalyzerGroup::PgLinter(group), + rule_name, + )) + } else { + Err("This rule doesn't exist.") + } + } _ => Err("Unknown analyzer type."), } } else { @@ -101,6 +117,12 @@ impl FromStr for RuleSelector { "This group doesn't exist. Use the syntax `/` to specify a rule.", ), }, + "pglinter" => match crate::pglinter::RuleGroup::from_str(rest) { + Ok(group) => Ok(RuleSelector::Group(AnalyzerGroup::PgLinter(group))), + Err(_) => Err( + "This group doesn't exist. Use the syntax `/` to specify a rule.", + ), + }, _ => Err("Unknown analyzer type."), } } diff --git a/crates/pgls_configuration/src/splinter/mod.rs b/crates/pgls_configuration/src/splinter/mod.rs index def25610d..fc4d1ee1e 100644 --- a/crates/pgls_configuration/src/splinter/mod.rs +++ b/crates/pgls_configuration/src/splinter/mod.rs @@ -2,7 +2,6 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] mod rules; -use biome_deserialize::StringSet; use biome_deserialize_macros::{Merge, Partial}; use bpaf::Bpaf; pub use rules::*; @@ -18,12 +17,6 @@ pub struct SplinterConfiguration { #[doc = r" List of rules"] #[partial(bpaf(pure(Default::default()), optional, hide))] pub rules: Rules, - #[doc = r" A list of Unix shell style patterns. The linter will ignore files/folders that will match these patterns."] - #[partial(bpaf(hide))] - pub ignore: StringSet, - #[doc = r" A list of Unix shell style patterns. The linter will include files/folders that will match these patterns."] - #[partial(bpaf(hide))] - pub include: StringSet, } impl SplinterConfiguration { pub const fn is_disabled(&self) -> bool { @@ -35,8 +28,6 @@ impl Default for SplinterConfiguration { Self { enabled: true, rules: Default::default(), - ignore: Default::default(), - include: Default::default(), } } } diff --git a/crates/pgls_configuration/src/splinter/rules.rs b/crates/pgls_configuration/src/splinter/rules.rs index 175623034..f38b54530 100644 --- a/crates/pgls_configuration/src/splinter/rules.rs +++ b/crates/pgls_configuration/src/splinter/rules.rs @@ -49,6 +49,7 @@ impl std::str::FromStr for RuleGroup { } #[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] #[cfg_attr(feature = "schema", derive(JsonSchema))] +#[cfg_attr(feature = "schema", schemars(rename = "SplinterRules"))] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct Rules { #[doc = r" It enables the lint rules recommended by Postgres Language Server. `true` by default."] @@ -177,25 +178,25 @@ pub struct Performance { #[doc = r" It enables ALL rules for this group."] #[serde(skip_serializing_if = "Option::is_none")] pub all: Option, - #[doc = "/// # Auth RLS Initialization Plan /// /// Detects if calls to `current_setting()` and `auth.()` in RLS policies are being unnecessarily re-evaluated for each row /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// with policies as ( /// select /// nsp.nspname as schema_name, /// pb.tablename as table_name, /// pc.relrowsecurity as is_rls_active, /// polname as policy_name, /// polpermissive as is_permissive, -- if not, then restrictive /// (select array_agg(r::regrole) from unnest(polroles) as x(r)) as roles, /// case polcmd /// when 'r' then 'SELECT' /// when 'a' then 'INSERT' /// when 'w' then 'UPDATE' /// when 'd' then 'DELETE' /// when '*' then 'ALL' /// end as command, /// qual, /// with_check /// from /// pg_catalog.pg_policy pa /// join pg_catalog.pg_class pc /// on pa.polrelid = pc.oid /// join pg_catalog.pg_namespace nsp /// on pc.relnamespace = nsp.oid /// join pg_catalog.pg_policies pb /// on pc.relname = pb.tablename /// and nsp.nspname = pb.schemaname /// and pa.polname = pb.policyname /// ) /// select /// 'auth_rls_initplan' as \"name!\", /// 'Auth RLS Initialization Plan' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['PERFORMANCE'] as \"categories!\", /// 'Detects if calls to \\`current_setting()\\` and \\`auth.\\()\\` in RLS policies are being unnecessarily re-evaluated for each row' as \"description!\", /// format( /// 'Table \\`%s.%s\\` has a row level security policy \\`%s\\` that re-evaluates current_setting() or auth.\\() for each row. This produces suboptimal query performance at scale. Resolve the issue by replacing \\`auth.\\()\\` with \\`(select auth.\\())\\`. See \\[docs](https://supabase.com/docs/guides/database/postgres/row-level-security#call-functions-with-select) for more info.', /// schema_name, /// table_name, /// policy_name /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0003_auth_rls_initplan' as \"remediation!\", /// jsonb_build_object( /// 'schema', schema_name, /// 'name', table_name, /// 'type', 'table' /// ) as \"metadata!\", /// format('auth_rls_init_plan_%s_%s_%s', schema_name, table_name, policy_name) as \"cache_key!\" /// from /// policies /// where /// is_rls_active /// -- NOTE: does not include realtime in support of monitoring policies on realtime.messages /// and schema_name not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and ( /// -- Example: auth.uid() /// ( /// qual like '%auth.uid()%' /// and lower(qual) not like '%select auth.uid()%' /// ) /// or ( /// qual like '%auth.jwt()%' /// and lower(qual) not like '%select auth.jwt()%' /// ) /// or ( /// qual like '%auth.role()%' /// and lower(qual) not like '%select auth.role()%' /// ) /// or ( /// qual like '%auth.email()%' /// and lower(qual) not like '%select auth.email()%' /// ) /// or ( /// qual like '%current\\_setting(%)%' /// and lower(qual) not like '%select current\\_setting(%)%' /// ) /// or ( /// with_check like '%auth.uid()%' /// and lower(with_check) not like '%select auth.uid()%' /// ) /// or ( /// with_check like '%auth.jwt()%' /// and lower(with_check) not like '%select auth.jwt()%' /// ) /// or ( /// with_check like '%auth.role()%' /// and lower(with_check) not like '%select auth.role()%' /// ) /// or ( /// with_check like '%auth.email()%' /// and lower(with_check) not like '%select auth.email()%' /// ) /// or ( /// with_check like '%current\\_setting(%)%' /// and lower(with_check) not like '%select current\\_setting(%)%' /// ) /// )) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"performance\": { /// \"authRlsInitplan\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0003_auth_rls_initplan"] + #[doc = "Auth RLS Initialization Plan: Detects if calls to `current_setting()` and `auth.()` in RLS policies are being unnecessarily re-evaluated for each row"] #[serde(skip_serializing_if = "Option::is_none")] pub auth_rls_initplan: Option>, - #[doc = "/// # Duplicate Index /// /// Detects cases where two ore more identical indexes exist. /// /// ## SQL Query /// /// sql /// ( /// select /// 'duplicate_index' as \"name!\", /// 'Duplicate Index' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['PERFORMANCE'] as \"categories!\", /// 'Detects cases where two ore more identical indexes exist.' as \"description!\", /// format( /// 'Table \\`%s.%s\\` has identical indexes %s. Drop all except one of them', /// n.nspname, /// c.relname, /// array_agg(pi.indexname order by pi.indexname) /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0009_duplicate_index' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', case /// when c.relkind = 'r' then 'table' /// when c.relkind = 'm' then 'materialized view' /// else 'ERROR' /// end, /// 'indexes', array_agg(pi.indexname order by pi.indexname) /// ) as \"metadata!\", /// format( /// 'duplicate_index_%s_%s_%s', /// n.nspname, /// c.relname, /// array_agg(pi.indexname order by pi.indexname) /// ) as \"cache_key!\" /// from /// pg_catalog.pg_indexes pi /// join pg_catalog.pg_namespace n /// on n.nspname = pi.schemaname /// join pg_catalog.pg_class c /// on pi.tablename = c.relname /// and n.oid = c.relnamespace /// left join pg_catalog.pg_depend dep /// on c.oid = dep.objid /// and dep.deptype = 'e' /// where /// c.relkind in ('r', 'm') -- tables and materialized views /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and dep.objid is null -- exclude tables owned by extensions /// group by /// n.nspname, /// c.relkind, /// c.relname, /// replace(pi.indexdef, pi.indexname, '') /// having /// count(*) > 1) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"performance\": { /// \"duplicateIndex\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0009_duplicate_index"] + #[doc = "Duplicate Index: Detects cases where two ore more identical indexes exist."] #[serde(skip_serializing_if = "Option::is_none")] pub duplicate_index: Option>, - #[doc = "/// # Multiple Permissive Policies /// /// Detects if multiple permissive row level security policies are present on a table for the same `role` and `action` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query. /// /// ## SQL Query /// /// sql /// ( /// select /// 'multiple_permissive_policies' as \"name!\", /// 'Multiple Permissive Policies' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['PERFORMANCE'] as \"categories!\", /// 'Detects if multiple permissive row level security policies are present on a table for the same \\`role\\` and \\`action\\` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query.' as \"description!\", /// format( /// 'Table \\`%s.%s\\` has multiple permissive policies for role \\`%s\\` for action \\`%s\\`. Policies include \\`%s\\`', /// n.nspname, /// c.relname, /// r.rolname, /// act.cmd, /// array_agg(p.polname order by p.polname) /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0006_multiple_permissive_policies' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'multiple_permissive_policies_%s_%s_%s_%s', /// n.nspname, /// c.relname, /// r.rolname, /// act.cmd /// ) as \"cache_key!\" /// from /// pg_catalog.pg_policy p /// join pg_catalog.pg_class c /// on p.polrelid = c.oid /// join pg_catalog.pg_namespace n /// on c.relnamespace = n.oid /// join pg_catalog.pg_roles r /// on p.polroles @> array\\[r.oid] /// or p.polroles = array\\[0::oid] /// left join pg_catalog.pg_depend dep /// on c.oid = dep.objid /// and dep.deptype = 'e', /// lateral ( /// select x.cmd /// from unnest(( /// select /// case p.polcmd /// when 'r' then array\\['SELECT'] /// when 'a' then array\\['INSERT'] /// when 'w' then array\\['UPDATE'] /// when 'd' then array\\['DELETE'] /// when '*' then array\\['SELECT', 'INSERT', 'UPDATE', 'DELETE'] /// else array\\['ERROR'] /// end as actions /// )) x(cmd) /// ) act(cmd) /// where /// c.relkind = 'r' -- regular tables /// and p.polpermissive -- policy is permissive /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and r.rolname not like 'pg_%' /// and r.rolname not like 'supabase%admin' /// and not r.rolbypassrls /// and dep.objid is null -- exclude tables owned by extensions /// group by /// n.nspname, /// c.relname, /// r.rolname, /// act.cmd /// having /// count(1) > 1) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"performance\": { /// \"multiplePermissivePolicies\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0006_multiple_permissive_policies"] + #[doc = "Multiple Permissive Policies: Detects if multiple permissive row level security policies are present on a table for the same `role` and `action` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query."] #[serde(skip_serializing_if = "Option::is_none")] pub multiple_permissive_policies: Option>, - #[doc = "/// # No Primary Key /// /// Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale. /// /// ## SQL Query /// /// sql /// ( /// select /// 'no_primary_key' as \"name!\", /// 'No Primary Key' as \"title!\", /// 'INFO' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['PERFORMANCE'] as \"categories!\", /// 'Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale.' as \"description!\", /// format( /// 'Table \\`%s.%s\\` does not have a primary key', /// pgns.nspname, /// pgc.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0004_no_primary_key' as \"remediation!\", /// jsonb_build_object( /// 'schema', pgns.nspname, /// 'name', pgc.relname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'no_primary_key_%s_%s', /// pgns.nspname, /// pgc.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_class pgc /// join pg_catalog.pg_namespace pgns /// on pgns.oid = pgc.relnamespace /// left join pg_catalog.pg_index pgi /// on pgi.indrelid = pgc.oid /// left join pg_catalog.pg_depend dep /// on pgc.oid = dep.objid /// and dep.deptype = 'e' /// where /// pgc.relkind = 'r' -- regular tables /// and pgns.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and dep.objid is null -- exclude tables owned by extensions /// group by /// pgc.oid, /// pgns.nspname, /// pgc.relname /// having /// max(coalesce(pgi.indisprimary, false)::int) = 0) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"performance\": { /// \"noPrimaryKey\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0004_no_primary_key"] + #[doc = "No Primary Key: Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale."] #[serde(skip_serializing_if = "Option::is_none")] pub no_primary_key: Option>, - #[doc = "/// # Table Bloat /// /// Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster. /// /// ## SQL Query /// /// sql /// ( /// with constants as ( /// select current_setting('block_size')::numeric as bs, 23 as hdr, 4 as ma /// ), /// /// bloat_info as ( /// select /// ma, /// bs, /// schemaname, /// tablename, /// (datawidth + (hdr + ma - (case when hdr % ma = 0 then ma else hdr % ma end)))::numeric as datahdr, /// (maxfracsum * (nullhdr + ma - (case when nullhdr % ma = 0 then ma else nullhdr % ma end))) as nullhdr2 /// from ( /// select /// schemaname, /// tablename, /// hdr, /// ma, /// bs, /// sum((1 - null_frac) * avg_width) as datawidth, /// max(null_frac) as maxfracsum, /// hdr + ( /// select 1 + count(*) / 8 /// from pg_stats s2 /// where /// null_frac \\<> 0 /// and s2.schemaname = s.schemaname /// and s2.tablename = s.tablename /// ) as nullhdr /// from pg_stats s, constants /// group by 1, 2, 3, 4, 5 /// ) as foo /// ), /// /// table_bloat as ( /// select /// schemaname, /// tablename, /// cc.relpages, /// bs, /// ceil((cc.reltuples * ((datahdr + ma - /// (case when datahdr % ma = 0 then ma else datahdr % ma end)) + nullhdr2 + 4)) / (bs - 20::float)) as otta /// from /// bloat_info /// join pg_class cc /// on cc.relname = bloat_info.tablename /// join pg_namespace nn /// on cc.relnamespace = nn.oid /// and nn.nspname = bloat_info.schemaname /// and nn.nspname \\<> 'information_schema' /// where /// cc.relkind = 'r' /// and cc.relam = (select oid from pg_am where amname = 'heap') /// ), /// /// bloat_data as ( /// select /// 'table' as type, /// schemaname, /// tablename as object_name, /// round(case when otta = 0 then 0.0 else table_bloat.relpages / otta::numeric end, 1) as bloat, /// case when relpages \\< otta then 0 else (bs * (table_bloat.relpages - otta)::bigint)::bigint end as raw_waste /// from /// table_bloat /// ) /// /// select /// 'table_bloat' as \"name!\", /// 'Table Bloat' as \"title!\", /// 'INFO' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['PERFORMANCE'] as \"categories!\", /// 'Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster.' as \"description!\", /// format( /// 'Table `%s`.`%s` has excessive bloat', /// bloat_data.schemaname, /// bloat_data.object_name /// ) as \"detail!\", /// 'Consider running vacuum full (WARNING: incurs downtime) and tweaking autovacuum settings to reduce bloat.' as \"remediation!\", /// jsonb_build_object( /// 'schema', bloat_data.schemaname, /// 'name', bloat_data.object_name, /// 'type', bloat_data.type /// ) as \"metadata!\", /// format( /// 'table_bloat_%s_%s', /// bloat_data.schemaname, /// bloat_data.object_name /// ) as \"cache_key!\" /// from /// bloat_data /// where /// bloat > 70.0 /// and raw_waste > (20 * 1024 * 1024) -- filter for waste > 200 MB /// order by /// schemaname, /// object_name) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"performance\": { /// \"tableBloat\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: "] + #[doc = "Table Bloat: Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster."] #[serde(skip_serializing_if = "Option::is_none")] pub table_bloat: Option>, - #[doc = "/// # Unindexed foreign keys /// /// Identifies foreign key constraints without a covering index, which can impact database performance. /// /// ## SQL Query /// /// sql /// with foreign_keys as ( /// select /// cl.relnamespace::regnamespace::text as schema_name, /// cl.relname as table_name, /// cl.oid as table_oid, /// ct.conname as fkey_name, /// ct.conkey as col_attnums /// from /// pg_catalog.pg_constraint ct /// join pg_catalog.pg_class cl -- fkey owning table /// on ct.conrelid = cl.oid /// left join pg_catalog.pg_depend d /// on d.objid = cl.oid /// and d.deptype = 'e' /// where /// ct.contype = 'f' -- foreign key constraints /// and d.objid is null -- exclude tables that are dependencies of extensions /// and cl.relnamespace::regnamespace::text not in ( /// 'pg_catalog', 'information_schema', 'auth', 'storage', 'vault', 'extensions' /// ) /// ), /// index_ as ( /// select /// pi.indrelid as table_oid, /// indexrelid::regclass as index_, /// string_to_array(indkey::text, ' ')::smallint\\[] as col_attnums /// from /// pg_catalog.pg_index pi /// where /// indisvalid /// ) /// select /// 'unindexed_foreign_keys' as \"name!\", /// 'Unindexed foreign keys' as \"title!\", /// 'INFO' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['PERFORMANCE'] as \"categories!\", /// 'Identifies foreign key constraints without a covering index, which can impact database performance.' as \"description!\", /// format( /// 'Table `%s.%s` has a foreign key `%s` without a covering index. This can lead to suboptimal query performance.', /// fk.schema_name, /// fk.table_name, /// fk.fkey_name /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0001_unindexed_foreign_keys' as \"remediation!\", /// jsonb_build_object( /// 'schema', fk.schema_name, /// 'name', fk.table_name, /// 'type', 'table', /// 'fkey_name', fk.fkey_name, /// 'fkey_columns', fk.col_attnums /// ) as \"metadata!\", /// format('unindexed_foreign_keys_%s_%s_%s', fk.schema_name, fk.table_name, fk.fkey_name) as \"cache_key!\" /// from /// foreign_keys fk /// left join index_ idx /// on fk.table_oid = idx.table_oid /// and fk.col_attnums = idx.col_attnums\\[1:array_length(fk.col_attnums, 1)] /// left join pg_catalog.pg_depend dep /// on idx.table_oid = dep.objid /// and dep.deptype = 'e' /// where /// idx.index_ is null /// and fk.schema_name not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and dep.objid is null -- exclude tables owned by extensions /// order by /// fk.schema_name, /// fk.table_name, /// fk.fkey_name /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"performance\": { /// \"unindexedForeignKeys\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0001_unindexed_foreign_keys"] + #[doc = "Unindexed foreign keys: Identifies foreign key constraints without a covering index, which can impact database performance."] #[serde(skip_serializing_if = "Option::is_none")] pub unindexed_foreign_keys: Option>, - #[doc = "/// # Unused Index /// /// Detects if an index has never been used and may be a candidate for removal. /// /// ## SQL Query /// /// sql /// ( /// select /// 'unused_index' as \"name!\", /// 'Unused Index' as \"title!\", /// 'INFO' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['PERFORMANCE'] as \"categories!\", /// 'Detects if an index has never been used and may be a candidate for removal.' as \"description!\", /// format( /// 'Index \\`%s\\` on table \\`%s.%s\\` has not been used', /// psui.indexrelname, /// psui.schemaname, /// psui.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0005_unused_index' as \"remediation!\", /// jsonb_build_object( /// 'schema', psui.schemaname, /// 'name', psui.relname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'unused_index_%s_%s_%s', /// psui.schemaname, /// psui.relname, /// psui.indexrelname /// ) as \"cache_key!\" /// /// from /// pg_catalog.pg_stat_user_indexes psui /// join pg_catalog.pg_index pi /// on psui.indexrelid = pi.indexrelid /// left join pg_catalog.pg_depend dep /// on psui.relid = dep.objid /// and dep.deptype = 'e' /// where /// psui.idx_scan = 0 /// and not pi.indisunique /// and not pi.indisprimary /// and dep.objid is null -- exclude tables owned by extensions /// and psui.schemaname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// )) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"performance\": { /// \"unusedIndex\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0005_unused_index"] + #[doc = "Unused Index: Detects if an index has never been used and may be a candidate for removal."] #[serde(skip_serializing_if = "Option::is_none")] pub unused_index: Option>, } @@ -210,7 +211,15 @@ impl Performance { "unindexedForeignKeys", "unusedIndex", ]; - const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + ]; const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), @@ -396,46 +405,46 @@ pub struct Security { #[doc = r" It enables ALL rules for this group."] #[serde(skip_serializing_if = "Option::is_none")] pub all: Option, - #[doc = "/// # Exposed Auth Users /// /// Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security. /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// select /// 'auth_users_exposed' as \"name!\", /// 'Exposed Auth Users' as \"title!\", /// 'ERROR' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security.' as \"description!\", /// format( /// 'View/Materialized View \"%s\" in the public schema may expose \\`auth.users\\` data to anon or authenticated roles.', /// c.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0002_auth_users_exposed' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'view', /// 'exposed_to', array_remove(array_agg(DISTINCT case when pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') then 'anon' when pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') then 'authenticated' end), null) /// ) as \"metadata!\", /// format('auth_users_exposed_%s_%s', n.nspname, c.relname) as \"cache_key!\" /// from /// -- Identify the oid for auth.users /// pg_catalog.pg_class auth_users_pg_class /// join pg_catalog.pg_namespace auth_users_pg_namespace /// on auth_users_pg_class.relnamespace = auth_users_pg_namespace.oid /// and auth_users_pg_class.relname = 'users' /// and auth_users_pg_namespace.nspname = 'auth' /// -- Depends on auth.users /// join pg_catalog.pg_depend d /// on d.refobjid = auth_users_pg_class.oid /// join pg_catalog.pg_rewrite r /// on r.oid = d.objid /// join pg_catalog.pg_class c /// on c.oid = r.ev_class /// join pg_catalog.pg_namespace n /// on n.oid = c.relnamespace /// join pg_catalog.pg_class pg_class_auth_users /// on d.refobjid = pg_class_auth_users.oid /// where /// d.deptype = 'n' /// and ( /// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') /// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') /// ) /// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) /// -- Exclude self /// and c.relname \\<> '0002_auth_users_exposed' /// -- There are 3 insecure configurations /// and /// ( /// -- Materialized views don't support RLS so this is insecure by default /// (c.relkind in ('m')) -- m for materialized view /// or /// -- Standard View, accessible to anon or authenticated that is security_definer /// ( /// c.relkind = 'v' -- v for view /// -- Exclude security invoker views /// and not ( /// lower(coalesce(c.reloptions::text,'{}'))::text\\[] /// && array\\[ /// 'security_invoker=1', /// 'security_invoker=true', /// 'security_invoker=yes', /// 'security_invoker=on' /// ] /// ) /// ) /// or /// -- Standard View, security invoker, but no RLS enabled on auth.users /// ( /// c.relkind in ('v') -- v for view /// -- is security invoker /// and ( /// lower(coalesce(c.reloptions::text,'{}'))::text\\[] /// && array\\[ /// 'security_invoker=1', /// 'security_invoker=true', /// 'security_invoker=yes', /// 'security_invoker=on' /// ] /// ) /// and not pg_class_auth_users.relrowsecurity /// ) /// ) /// group by /// n.nspname, /// c.relname, /// c.oid) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"authUsersExposed\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0002_auth_users_exposed"] + #[doc = "Exposed Auth Users: Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security."] #[serde(skip_serializing_if = "Option::is_none")] pub auth_users_exposed: Option>, - #[doc = "/// # Extension in Public /// /// Detects extensions installed in the `public` schema. /// /// ## SQL Query /// /// sql /// ( /// select /// 'extension_in_public' as \"name!\", /// 'Extension in Public' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects extensions installed in the \\`public\\` schema.' as \"description!\", /// format( /// 'Extension \\`%s\\` is installed in the public schema. Move it to another schema.', /// pe.extname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0014_extension_in_public' as \"remediation!\", /// jsonb_build_object( /// 'schema', pe.extnamespace::regnamespace, /// 'name', pe.extname, /// 'type', 'extension' /// ) as \"metadata!\", /// format( /// 'extension_in_public_%s', /// pe.extname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_extension pe /// where /// -- plpgsql is installed by default in public and outside user control /// -- confirmed safe /// pe.extname not in ('plpgsql') /// -- Scoping this to public is not optimal. Ideally we would use the postgres /// -- search path. That currently isn't available via SQL. In other lints /// -- we have used has_schema_privilege('anon', 'extensions', 'USAGE') but that /// -- is not appropriate here as it would evaluate true for the extensions schema /// and pe.extnamespace::regnamespace::text = 'public') /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"extensionInPublic\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0014_extension_in_public"] + #[doc = "Extension in Public: Detects extensions installed in the `public` schema."] #[serde(skip_serializing_if = "Option::is_none")] pub extension_in_public: Option>, - #[doc = "/// # Extension Versions Outdated /// /// Detects extensions that are not using the default (recommended) version. /// /// ## SQL Query /// /// sql /// ( /// select /// 'extension_versions_outdated' as \"name!\", /// 'Extension Versions Outdated' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects extensions that are not using the default (recommended) version.' as \"description!\", /// format( /// 'Extension `%s` is using version `%s` but version `%s` is available. Using outdated extension versions may expose the database to security vulnerabilities.', /// ext.name, /// ext.installed_version, /// ext.default_version /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0022_extension_versions_outdated' as \"remediation!\", /// jsonb_build_object( /// 'extension_name', ext.name, /// 'installed_version', ext.installed_version, /// 'default_version', ext.default_version /// ) as \"metadata!\", /// format( /// 'extension_versions_outdated_%s_%s', /// ext.name, /// ext.installed_version /// ) as \"cache_key!\" /// from /// pg_catalog.pg_available_extensions ext /// join /// -- ignore versions not in pg_available_extension_versions /// -- e.g. residue of pg_upgrade /// pg_catalog.pg_available_extension_versions extv /// on extv.name = ext.name and extv.installed /// where /// ext.installed_version is not null /// and ext.default_version is not null /// and ext.installed_version != ext.default_version /// order by /// ext.name) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"extensionVersionsOutdated\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0022_extension_versions_outdated"] + #[doc = "Extension Versions Outdated: Detects extensions that are not using the default (recommended) version."] #[serde(skip_serializing_if = "Option::is_none")] pub extension_versions_outdated: Option>, - #[doc = "/// # Foreign Key to Auth Unique Constraint /// /// Detects user defined foreign keys to unique constraints in the auth schema. /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// select /// 'fkey_to_auth_unique' as \"name!\", /// 'Foreign Key to Auth Unique Constraint' as \"title!\", /// 'ERROR' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects user defined foreign keys to unique constraints in the auth schema.' as \"description!\", /// format( /// 'Table `%s`.`%s` has a foreign key `%s` referencing an auth unique constraint', /// n.nspname, -- referencing schema /// c_rel.relname, -- referencing table /// c.conname -- fkey name /// ) as \"detail!\", /// 'Drop the foreign key constraint that references the auth schema.' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c_rel.relname, /// 'foreign_key', c.conname /// ) as \"metadata!\", /// format( /// 'fkey_to_auth_unique_%s_%s_%s', /// n.nspname, -- referencing schema /// c_rel.relname, -- referencing table /// c.conname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_constraint c /// join pg_catalog.pg_class c_rel /// on c.conrelid = c_rel.oid /// join pg_catalog.pg_namespace n /// on c_rel.relnamespace = n.oid /// join pg_catalog.pg_class ref_rel /// on c.confrelid = ref_rel.oid /// join pg_catalog.pg_namespace cn /// on ref_rel.relnamespace = cn.oid /// join pg_catalog.pg_index i /// on c.conindid = i.indexrelid /// where c.contype = 'f' /// and cn.nspname = 'auth' /// and i.indisunique /// and not i.indisprimary) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"fkeyToAuthUnique\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: "] + #[doc = "Foreign Key to Auth Unique Constraint: Detects user defined foreign keys to unique constraints in the auth schema."] #[serde(skip_serializing_if = "Option::is_none")] pub fkey_to_auth_unique: Option>, - #[doc = "/// # Foreign Table in API /// /// Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies. /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// select /// 'foreign_table_in_api' as \"name!\", /// 'Foreign Table in API' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies.' as \"description!\", /// format( /// 'Foreign table \\`%s.%s\\` is accessible over APIs', /// n.nspname, /// c.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0017_foreign_table_in_api' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'foreign table' /// ) as \"metadata!\", /// format( /// 'foreign_table_in_api_%s_%s', /// n.nspname, /// c.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_class c /// join pg_catalog.pg_namespace n /// on n.oid = c.relnamespace /// left join pg_catalog.pg_depend dep /// on c.oid = dep.objid /// and dep.deptype = 'e' /// where /// c.relkind = 'f' /// and ( /// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') /// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') /// ) /// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and dep.objid is null) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"foreignTableInApi\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0017_foreign_table_in_api"] + #[doc = "Foreign Table in API: Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies."] #[serde(skip_serializing_if = "Option::is_none")] pub foreign_table_in_api: Option>, - #[doc = "/// # Function Search Path Mutable /// /// Detects functions where the search_path parameter is not set. /// /// ## SQL Query /// /// sql /// ( /// select /// 'function_search_path_mutable' as \"name!\", /// 'Function Search Path Mutable' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects functions where the search_path parameter is not set.' as \"description!\", /// format( /// 'Function \\`%s.%s\\` has a role mutable search_path', /// n.nspname, /// p.proname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0011_function_search_path_mutable' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', p.proname, /// 'type', 'function' /// ) as \"metadata!\", /// format( /// 'function_search_path_mutable_%s_%s_%s', /// n.nspname, /// p.proname, /// md5(p.prosrc) -- required when function is polymorphic /// ) as \"cache_key!\" /// from /// pg_catalog.pg_proc p /// join pg_catalog.pg_namespace n /// on p.pronamespace = n.oid /// left join pg_catalog.pg_depend dep /// on p.oid = dep.objid /// and dep.deptype = 'e' /// where /// n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and dep.objid is null -- exclude functions owned by extensions /// -- Search path not set /// and not exists ( /// select 1 /// from unnest(coalesce(p.proconfig, '{}')) as config /// where config like 'search_path=%' /// )) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"functionSearchPathMutable\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0011_function_search_path_mutable"] + #[doc = "Function Search Path Mutable: Detects functions where the search_path parameter is not set."] #[serde(skip_serializing_if = "Option::is_none")] pub function_search_path_mutable: Option>, - #[doc = "/// # Insecure Queue Exposed in API /// /// Detects cases where an insecure Queue is exposed over Data APIs /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// select /// 'insecure_queue_exposed_in_api' as \"name!\", /// 'Insecure Queue Exposed in API' as \"title!\", /// 'ERROR' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects cases where an insecure Queue is exposed over Data APIs' as \"description!\", /// format( /// 'Table \\`%s.%s\\` is public, but RLS has not been enabled.', /// n.nspname, /// c.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0019_insecure_queue_exposed_in_api' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'rls_disabled_in_public_%s_%s', /// n.nspname, /// c.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_class c /// join pg_catalog.pg_namespace n /// on c.relnamespace = n.oid /// where /// c.relkind in ('r', 'I') -- regular or partitioned tables /// and not c.relrowsecurity -- RLS is disabled /// and ( /// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') /// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') /// ) /// and n.nspname = 'pgmq' -- tables in the pgmq schema /// and c.relname like 'q_%' -- only queue tables /// -- Constant requirements /// and 'pgmq_public' = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"insecureQueueExposedInApi\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0019_insecure_queue_exposed_in_api"] + #[doc = "Insecure Queue Exposed in API: Detects cases where an insecure Queue is exposed over Data APIs"] #[serde(skip_serializing_if = "Option::is_none")] pub insecure_queue_exposed_in_api: Option>, - #[doc = "/// # Materialized View in API /// /// Detects materialized views that are accessible over the Data APIs. /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// select /// 'materialized_view_in_api' as \"name!\", /// 'Materialized View in API' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects materialized views that are accessible over the Data APIs.' as \"description!\", /// format( /// 'Materialized view \\`%s.%s\\` is selectable by anon or authenticated roles', /// n.nspname, /// c.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0016_materialized_view_in_api' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'materialized view' /// ) as \"metadata!\", /// format( /// 'materialized_view_in_api_%s_%s', /// n.nspname, /// c.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_class c /// join pg_catalog.pg_namespace n /// on n.oid = c.relnamespace /// left join pg_catalog.pg_depend dep /// on c.oid = dep.objid /// and dep.deptype = 'e' /// where /// c.relkind = 'm' /// and ( /// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') /// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') /// ) /// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and dep.objid is null) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"materializedViewInApi\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0016_materialized_view_in_api"] + #[doc = "Materialized View in API: Detects materialized views that are accessible over the Data APIs."] #[serde(skip_serializing_if = "Option::is_none")] pub materialized_view_in_api: Option>, - #[doc = "/// # Policy Exists RLS Disabled /// /// Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table. /// /// ## SQL Query /// /// sql /// ( /// select /// 'policy_exists_rls_disabled' as \"name!\", /// 'Policy Exists RLS Disabled' as \"title!\", /// 'ERROR' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table.' as \"description!\", /// format( /// 'Table \\`%s.%s\\` has RLS policies but RLS is not enabled on the table. Policies include %s.', /// n.nspname, /// c.relname, /// array_agg(p.polname order by p.polname) /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0007_policy_exists_rls_disabled' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'policy_exists_rls_disabled_%s_%s', /// n.nspname, /// c.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_policy p /// join pg_catalog.pg_class c /// on p.polrelid = c.oid /// join pg_catalog.pg_namespace n /// on c.relnamespace = n.oid /// left join pg_catalog.pg_depend dep /// on c.oid = dep.objid /// and dep.deptype = 'e' /// where /// c.relkind = 'r' -- regular tables /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// -- RLS is disabled /// and not c.relrowsecurity /// and dep.objid is null -- exclude tables owned by extensions /// group by /// n.nspname, /// c.relname) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"policyExistsRlsDisabled\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0007_policy_exists_rls_disabled"] + #[doc = "Policy Exists RLS Disabled: Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table."] #[serde(skip_serializing_if = "Option::is_none")] pub policy_exists_rls_disabled: Option>, - #[doc = "/// # RLS Disabled in Public /// /// Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// select /// 'rls_disabled_in_public' as \"name!\", /// 'RLS Disabled in Public' as \"title!\", /// 'ERROR' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST' as \"description!\", /// format( /// 'Table \\`%s.%s\\` is public, but RLS has not been enabled.', /// n.nspname, /// c.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0013_rls_disabled_in_public' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'rls_disabled_in_public_%s_%s', /// n.nspname, /// c.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_class c /// join pg_catalog.pg_namespace n /// on c.relnamespace = n.oid /// where /// c.relkind = 'r' -- regular tables /// -- RLS is disabled /// and not c.relrowsecurity /// and ( /// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') /// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') /// ) /// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// )) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"rlsDisabledInPublic\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0013_rls_disabled_in_public"] + #[doc = "RLS Disabled in Public: Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST"] #[serde(skip_serializing_if = "Option::is_none")] pub rls_disabled_in_public: Option>, - #[doc = "/// # RLS Enabled No Policy /// /// Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created. /// /// ## SQL Query /// /// sql /// ( /// select /// 'rls_enabled_no_policy' as \"name!\", /// 'RLS Enabled No Policy' as \"title!\", /// 'INFO' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created.' as \"description!\", /// format( /// 'Table \\`%s.%s\\` has RLS enabled, but no policies exist', /// n.nspname, /// c.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0008_rls_enabled_no_policy' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'rls_enabled_no_policy_%s_%s', /// n.nspname, /// c.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_class c /// left join pg_catalog.pg_policy p /// on p.polrelid = c.oid /// join pg_catalog.pg_namespace n /// on c.relnamespace = n.oid /// left join pg_catalog.pg_depend dep /// on c.oid = dep.objid /// and dep.deptype = 'e' /// where /// c.relkind = 'r' -- regular tables /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// -- RLS is enabled /// and c.relrowsecurity /// and p.polname is null /// and dep.objid is null -- exclude tables owned by extensions /// group by /// n.nspname, /// c.relname) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"rlsEnabledNoPolicy\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0008_rls_enabled_no_policy"] + #[doc = "RLS Enabled No Policy: Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created."] #[serde(skip_serializing_if = "Option::is_none")] pub rls_enabled_no_policy: Option>, - #[doc = "/// # RLS references user metadata /// /// Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy. /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// with policies as ( /// select /// nsp.nspname as schema_name, /// pb.tablename as table_name, /// polname as policy_name, /// qual, /// with_check /// from /// pg_catalog.pg_policy pa /// join pg_catalog.pg_class pc /// on pa.polrelid = pc.oid /// join pg_catalog.pg_namespace nsp /// on pc.relnamespace = nsp.oid /// join pg_catalog.pg_policies pb /// on pc.relname = pb.tablename /// and nsp.nspname = pb.schemaname /// and pa.polname = pb.policyname /// ) /// select /// 'rls_references_user_metadata' as \"name!\", /// 'RLS references user metadata' as \"title!\", /// 'ERROR' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy.' as \"description!\", /// format( /// 'Table \\`%s.%s\\` has a row level security policy \\`%s\\` that references Supabase Auth \\`user_metadata\\`. \\`user_metadata\\` is editable by end users and should never be used in a security context.', /// schema_name, /// table_name, /// policy_name /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0015_rls_references_user_metadata' as \"remediation!\", /// jsonb_build_object( /// 'schema', schema_name, /// 'name', table_name, /// 'type', 'table' /// ) as \"metadata!\", /// format('rls_references_user_metadata_%s_%s_%s', schema_name, table_name, policy_name) as \"cache_key!\" /// from /// policies /// where /// schema_name not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and ( /// -- Example: auth.jwt() -> 'user_metadata' /// -- False positives are possible, but it isn't practical to string match /// -- If false positive rate is too high, this expression can iterate /// qual like '%auth.jwt()%user_metadata%' /// or qual like '%current_setting(%request.jwt.claims%)%user_metadata%' /// or with_check like '%auth.jwt()%user_metadata%' /// or with_check like '%current_setting(%request.jwt.claims%)%user_metadata%' /// )) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"rlsReferencesUserMetadata\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0015_rls_references_user_metadata"] + #[doc = "RLS references user metadata: Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy."] #[serde(skip_serializing_if = "Option::is_none")] pub rls_references_user_metadata: Option>, - #[doc = "/// # Security Definer View /// /// Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user /// /// Note: This rule requires Supabase roles (anon, authenticated, service_role). /// It will be automatically skipped if these roles don't exist in your database. /// /// ## SQL Query /// /// sql /// ( /// select /// 'security_definer_view' as \"name!\", /// 'Security Definer View' as \"title!\", /// 'ERROR' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user' as \"description!\", /// format( /// 'View \\`%s.%s\\` is defined with the SECURITY DEFINER property', /// n.nspname, /// c.relname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=0010_security_definer_view' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'type', 'view' /// ) as \"metadata!\", /// format( /// 'security_definer_view_%s_%s', /// n.nspname, /// c.relname /// ) as \"cache_key!\" /// from /// pg_catalog.pg_class c /// join pg_catalog.pg_namespace n /// on n.oid = c.relnamespace /// left join pg_catalog.pg_depend dep /// on c.oid = dep.objid /// and dep.deptype = 'e' /// where /// c.relkind = 'v' /// and ( /// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') /// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') /// ) /// and substring(pg_catalog.version() from 'PostgreSQL (\\[0-9]+)') >= '15' -- security invoker was added in pg15 /// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))) /// and n.nspname not in ( /// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault' /// ) /// and dep.objid is null -- exclude views owned by extensions /// and not ( /// lower(coalesce(c.reloptions::text,'{}'))::text\\[] /// && array\\[ /// 'security_invoker=1', /// 'security_invoker=true', /// 'security_invoker=yes', /// 'security_invoker=on' /// ] /// )) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"securityDefinerView\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=0010_security_definer_view"] + #[doc = "Security Definer View: Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user"] #[serde(skip_serializing_if = "Option::is_none")] pub security_definer_view: Option>, - #[doc = "/// # Unsupported reg types /// /// Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade. /// /// ## SQL Query /// /// sql /// ( /// select /// 'unsupported_reg_types' as \"name!\", /// 'Unsupported reg types' as \"title!\", /// 'WARN' as \"level!\", /// 'EXTERNAL' as \"facing!\", /// array\\['SECURITY'] as \"categories!\", /// 'Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade.' as \"description!\", /// format( /// 'Table \\`%s.%s\\` has a column \\`%s\\` with unsupported reg* type \\`%s\\`.', /// n.nspname, /// c.relname, /// a.attname, /// t.typname /// ) as \"detail!\", /// 'https://supabase.com/docs/guides/database/database-linter?lint=unsupported_reg_types' as \"remediation!\", /// jsonb_build_object( /// 'schema', n.nspname, /// 'name', c.relname, /// 'column', a.attname, /// 'type', 'table' /// ) as \"metadata!\", /// format( /// 'unsupported_reg_types_%s_%s_%s', /// n.nspname, /// c.relname, /// a.attname /// ) AS cache_key /// from /// pg_catalog.pg_attribute a /// join pg_catalog.pg_class c /// on a.attrelid = c.oid /// join pg_catalog.pg_namespace n /// on c.relnamespace = n.oid /// join pg_catalog.pg_type t /// on a.atttypid = t.oid /// join pg_catalog.pg_namespace tn /// on t.typnamespace = tn.oid /// where /// tn.nspname = 'pg_catalog' /// and t.typname in ('regcollation', 'regconfig', 'regdictionary', 'regnamespace', 'regoper', 'regoperator', 'regproc', 'regprocedure') /// and n.nspname not in ('pg_catalog', 'information_schema', 'pgsodium')) /// /// /// ## Configuration /// /// Enable or disable this rule in your configuration: /// /// json /// { /// \"splinter\": { /// \"rules\": { /// \"security\": { /// \"unsupportedRegTypes\": \"warn\" /// } /// } /// } /// } /// /// /// ## Remediation /// /// See: https://supabase.com/docs/guides/database/database-linter?lint=unsupported_reg_types"] + #[doc = "Unsupported reg types: Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade."] #[serde(skip_serializing_if = "Option::is_none")] pub unsupported_reg_types: Option>, } @@ -457,7 +466,22 @@ impl Security { "securityDefinerView", "unsupportedRegTypes", ]; - const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[12]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[13]), + ]; const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), diff --git a/crates/pgls_diagnostics_categories/src/categories.rs b/crates/pgls_diagnostics_categories/src/categories.rs index eb9e323ac..2b27f997f 100644 --- a/crates/pgls_diagnostics_categories/src/categories.rs +++ b/crates/pgls_diagnostics_categories/src/categories.rs @@ -47,6 +47,35 @@ define_categories! { "lint/safety/runningStatementWhileHoldingAccessExclusive": "https://pg-language-server.com/latest/reference/rules/running-statement-while-holding-access-exclusive/", "lint/safety/transactionNesting": "https://pg-language-server.com/latest/reference/rules/transaction-nesting/", // end lint rules + // pglinter rules start + // Meta diagnostics + "pglinter/extensionNotInstalled": "Install the pglinter extension with: CREATE EXTENSION pglinter", + "pglinter/ruleDisabledInExtension": "Enable the rule in the extension with: UPDATE pglinter.rules SET enable = true WHERE code = ''", + // Base rules (B-series) + "pglinter/base/compositePrimaryKeyTooManyColumns": "https://github.com/pmpetit/pglinter#b012", + "pglinter/base/howManyObjectsWithUppercase": "https://github.com/pmpetit/pglinter#b005", + "pglinter/base/howManyRedudantIndex": "https://github.com/pmpetit/pglinter#b002", + "pglinter/base/howManyTableWithoutIndexOnFk": "https://github.com/pmpetit/pglinter#b003", + "pglinter/base/howManyTableWithoutPrimaryKey": "https://github.com/pmpetit/pglinter#b001", + "pglinter/base/howManyTablesNeverSelected": "https://github.com/pmpetit/pglinter#b006", + "pglinter/base/howManyTablesWithFkMismatch": "https://github.com/pmpetit/pglinter#b008", + "pglinter/base/howManyTablesWithFkOutsideSchema": "https://github.com/pmpetit/pglinter#b007", + "pglinter/base/howManyTablesWithReservedKeywords": "https://github.com/pmpetit/pglinter#b010", + "pglinter/base/howManyTablesWithSameTrigger": "https://github.com/pmpetit/pglinter#b009", + "pglinter/base/howManyUnusedIndex": "https://github.com/pmpetit/pglinter#b004", + "pglinter/base/severalTableOwnerInSchema": "https://github.com/pmpetit/pglinter#b011", + // Cluster rules (C-series) + "pglinter/cluster/passwordEncryptionIsMd5": "https://github.com/pmpetit/pglinter#c003", + "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists": "https://github.com/pmpetit/pglinter#c002", + "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists": "https://github.com/pmpetit/pglinter#c001", + // Schema rules (S-series) + "pglinter/schema/ownerSchemaIsInternalRole": "https://github.com/pmpetit/pglinter#s004", + "pglinter/schema/schemaOwnerDoNotMatchTableOwner": "https://github.com/pmpetit/pglinter#s005", + "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt": "https://github.com/pmpetit/pglinter#s002", + "pglinter/schema/schemaWithDefaultRoleNotGranted": "https://github.com/pmpetit/pglinter#s001", + "pglinter/schema/unsecuredPublicSchema": "https://github.com/pmpetit/pglinter#s003", + // pglinter rules end + // splinter rules start "splinter/performance/authRlsInitplan": "https://supabase.com/docs/guides/database/database-linter?lint=0003_auth_rls_initplan", "splinter/performance/duplicateIndex": "https://supabase.com/docs/guides/database/database-linter?lint=0009_duplicate_index", @@ -98,4 +127,11 @@ define_categories! { "splinter/performance", "splinter/security", // Splinter groups end + + // Pglinter groups start + "pglinter", + "pglinter/base", + "pglinter/cluster", + "pglinter/schema", + // Pglinter groups end } diff --git a/crates/pgls_pglinter/Cargo.toml b/crates/pgls_pglinter/Cargo.toml new file mode 100644 index 000000000..345dd4119 --- /dev/null +++ b/crates/pgls_pglinter/Cargo.toml @@ -0,0 +1,32 @@ +[package] +authors.workspace = true +categories.workspace = true +description = "pglinter Postgres extension integration for database linting" +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +name = "pgls_pglinter" +repository.workspace = true +version = "0.0.0" + +[dependencies] +pgls_analyse.workspace = true +pgls_diagnostics.workspace = true +pgls_diagnostics_categories.workspace = true +pgls_schema_cache.workspace = true +rustc-hash.workspace = true +serde.workspace = true +serde_json.workspace = true +sqlx.workspace = true + +[dev-dependencies] +insta.workspace = true +pgls_console.workspace = true +pgls_test_utils.workspace = true + +[build-dependencies] +ureq = "2.9" + +[lib] +doctest = false diff --git a/crates/pgls_pglinter/build.rs b/crates/pgls_pglinter/build.rs new file mode 100644 index 000000000..ee178f8fb --- /dev/null +++ b/crates/pgls_pglinter/build.rs @@ -0,0 +1,51 @@ +use std::fs; +use std::io::Write; +use std::path::Path; + +const EXPECTED_COMMIT: &str = "main"; +const REPO: &str = "pmpetit/pglinter"; + +fn main() { + let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); + let vendor_dir = Path::new(&manifest_dir).join("vendor"); + let sql_dir = vendor_dir.join("sql"); + let sha_file = vendor_dir.join("COMMIT_SHA.txt"); + let rules_file = sql_dir.join("rules.sql"); + + // Check if vendor files exist and SHA matches + let needs_download = if sha_file.exists() && rules_file.exists() { + let current_sha = fs::read_to_string(&sha_file).unwrap_or_default(); + current_sha.trim() != EXPECTED_COMMIT + } else { + true + }; + + if needs_download { + println!("cargo:warning=Downloading pglinter vendor files..."); + + // Create directories + fs::create_dir_all(&sql_dir).expect("Failed to create vendor/sql directory"); + + // Download rules.sql using ureq (blocking HTTP client) + let url = + format!("https://raw.githubusercontent.com/{REPO}/{EXPECTED_COMMIT}/sql/rules.sql"); + + let response = ureq::get(&url) + .call() + .expect("Failed to download rules.sql"); + + let content = response.into_string().expect("Failed to read response"); + + let mut file = fs::File::create(&rules_file).expect("Failed to create rules.sql"); + file.write_all(content.as_bytes()) + .expect("Failed to write rules.sql"); + + // Write commit SHA + fs::write(&sha_file, EXPECTED_COMMIT).expect("Failed to write COMMIT_SHA.txt"); + + println!("cargo:warning=Downloaded pglinter vendor files successfully"); + } + + // Tell cargo to rerun if SHA changes + println!("cargo:rerun-if-changed=vendor/COMMIT_SHA.txt"); +} diff --git a/crates/pgls_pglinter/src/cache.rs b/crates/pgls_pglinter/src/cache.rs new file mode 100644 index 000000000..f8739e4d5 --- /dev/null +++ b/crates/pgls_pglinter/src/cache.rs @@ -0,0 +1,58 @@ +//! Pglinter extension cache for avoiding repeated database queries + +use pgls_schema_cache::SchemaCache; +use rustc_hash::FxHashSet; +use sqlx::PgPool; + +/// Cached pglinter extension state (loaded once, reused) +#[derive(Debug, Clone, Default)] +pub struct PglinterCache { + /// Whether the pglinter extension is installed + pub extension_installed: bool, + /// Rule codes that are disabled in the pglinter extension + pub disabled_rules: FxHashSet, +} + +impl PglinterCache { + /// Load pglinter extension state from database using official API + pub async fn load(conn: &PgPool, schema_cache: &SchemaCache) -> Result { + let extension_installed = schema_cache.extensions.iter().any(|e| e.name == "pglinter"); + + if !extension_installed { + return Ok(Self { + extension_installed: false, + disabled_rules: FxHashSet::default(), + }); + } + + // Get disabled rules using pglinter.show_rules() - single query + let disabled_rules = get_disabled_rules(conn).await?; + + Ok(Self { + extension_installed, + disabled_rules, + }) + } + + /// Create initial cache from schema cache only (disabled rules will need API call later) + pub fn from_schema_cache(schema_cache: &SchemaCache) -> Self { + Self { + extension_installed: schema_cache.extensions.iter().any(|e| e.name == "pglinter"), + disabled_rules: FxHashSet::default(), + } + } +} + +/// Get disabled rules by querying the pglinter.rules table +/// Uses the rules table directly since show_rules() only outputs to NOTICE +pub async fn get_disabled_rules(conn: &PgPool) -> Result, sqlx::Error> { + let rows: Vec<(String, bool)> = sqlx::query_as("SELECT code, enable FROM pglinter.rules") + .fetch_all(conn) + .await?; + + Ok(rows + .into_iter() + .filter(|(_, enabled)| !enabled) + .map(|(code, _)| code) + .collect()) +} diff --git a/crates/pgls_pglinter/src/diagnostics.rs b/crates/pgls_pglinter/src/diagnostics.rs new file mode 100644 index 000000000..eb75026c6 --- /dev/null +++ b/crates/pgls_pglinter/src/diagnostics.rs @@ -0,0 +1,137 @@ +//! Pglinter diagnostic types + +use pgls_diagnostics::{ + Advices, Category, DatabaseObjectOwned, Diagnostic, LogCategory, MessageAndDescription, + Severity, Visit, +}; +use std::io; + +/// A specialized diagnostic for pglinter (database-level linting via pglinter extension). +#[derive(Debug, Diagnostic, PartialEq)] +pub struct PglinterDiagnostic { + #[category] + pub category: &'static Category, + + #[location(database_object)] + pub db_object: Option, + + #[message] + #[description] + pub message: MessageAndDescription, + + #[severity] + pub severity: Severity, + + #[advice] + pub advices: PglinterAdvices, +} + +/// Advices for pglinter diagnostics +#[derive(Debug, PartialEq)] +pub struct PglinterAdvices { + /// General description of what this rule detects + pub description: String, + + /// Rule code (e.g., "B001", "S001", "C001") + pub rule_code: Option, + + /// Suggested fixes for the issue + pub fixes: Vec, + + /// List of affected database objects + pub object_list: Option, +} + +impl Advices for PglinterAdvices { + fn record(&self, visitor: &mut dyn Visit) -> io::Result<()> { + if !self.description.is_empty() { + visitor.record_log(LogCategory::None, &self.description)?; + } + + if let Some(code) = &self.rule_code { + visitor.record_log(LogCategory::Info, &format!("Rule: {code}"))?; + } + + if let Some(objects) = &self.object_list { + if !objects.is_empty() { + visitor.record_log(LogCategory::None, &"Affected objects:")?; + for line in objects.lines() { + visitor.record_log(LogCategory::Info, &format!(" {line}"))?; + } + } + } + + if !self.fixes.is_empty() { + visitor.record_log(LogCategory::None, &"How to fix:")?; + for (i, fix) in self.fixes.iter().enumerate() { + let num = i + 1; + visitor.record_log(LogCategory::Info, &format!(" {num}. {fix}"))?; + } + } + + Ok(()) + } +} + +impl PglinterDiagnostic { + /// Create diagnostic for missing pglinter extension + pub fn extension_not_installed() -> PglinterDiagnostic { + PglinterDiagnostic { + category: pgls_diagnostics::category!("pglinter/extensionNotInstalled"), + db_object: None, + message: "The pglinter extension is not installed in the database. Install it with 'CREATE EXTENSION pglinter' or disable pglinter rules in your configuration.".into(), + severity: Severity::Error, + advices: PglinterAdvices { + description: "pglinter rules are enabled in your configuration but the extension is not installed.".to_string(), + rule_code: None, + fixes: vec!["Install the pglinter extension: CREATE EXTENSION pglinter".to_string()], + object_list: None, + }, + } + } + + /// Create diagnostic for rule disabled in pglinter extension + pub fn rule_disabled_in_extension(rule_code: &str) -> PglinterDiagnostic { + let description = format!( + "Rule {rule_code} is enabled in configuration but disabled in pglinter extension. Enable it with: SELECT pglinter.enable_rule('{rule_code}')" + ); + + PglinterDiagnostic { + category: pgls_diagnostics::category!("pglinter/ruleDisabledInExtension"), + db_object: None, + message: description.into(), + severity: Severity::Error, + advices: PglinterAdvices { + description: format!( + "Rule {rule_code} is configured to run but is disabled in the pglinter extension." + ), + rule_code: Some(rule_code.to_string()), + fixes: vec![format!( + "Enable the rule: SELECT pglinter.enable_rule('{rule_code}')" + )], + object_list: None, + }, + } + } + + /// Create diagnostic from rule code using known metadata + pub fn from_rule_code(rule_code: &str) -> Option { + let category = crate::registry::get_rule_category(rule_code)?; + let metadata = crate::registry::get_rule_metadata_by_code(rule_code)?; + + let fixes: Vec = metadata.fixes.iter().map(|s| s.to_string()).collect(); + + Some(PglinterDiagnostic { + category, + db_object: None, + message: metadata.description.into(), + severity: Severity::Warning, + advices: PglinterAdvices { + description: metadata.description.to_string(), + rule_code: Some(rule_code.to_string()), + fixes, + object_list: None, + }, + }) + } +} diff --git a/crates/pgls_pglinter/src/lib.rs b/crates/pgls_pglinter/src/lib.rs new file mode 100644 index 000000000..870a29072 --- /dev/null +++ b/crates/pgls_pglinter/src/lib.rs @@ -0,0 +1,143 @@ +//! pglinter Postgres extension integration for database linting + +mod cache; +mod diagnostics; +pub mod registry; +pub mod rule; +pub mod rules; + +use pgls_analyse::{AnalysisFilter, RegistryVisitor, RuleMeta}; +use pgls_schema_cache::SchemaCache; +use sqlx::PgPool; + +pub use cache::PglinterCache; +pub use diagnostics::{PglinterAdvices, PglinterDiagnostic}; +pub use rule::PglinterRule; + +/// Parameters for running pglinter +#[derive(Debug)] +pub struct PglinterParams<'a> { + pub conn: &'a PgPool, + pub schema_cache: &'a SchemaCache, +} + +/// Visitor that collects enabled pglinter rules based on filter +struct RuleCollector<'a> { + filter: &'a AnalysisFilter<'a>, + enabled_rules: Vec, +} + +impl<'a> RegistryVisitor for RuleCollector<'a> { + fn record_category(&mut self) { + if self.filter.match_category::() { + C::record_groups(self); + } + } + + fn record_group(&mut self) { + if self.filter.match_group::() { + G::record_rules(self); + } + } + + fn record_rule(&mut self) { + if self.filter.match_rule::() { + if let Some(code) = registry::get_rule_code(R::METADATA.name) { + self.enabled_rules.push(code.to_string()); + } + } + } +} + +fn collect_enabled_rules(filter: &AnalysisFilter<'_>) -> Vec { + let mut collector = RuleCollector { + filter, + enabled_rules: Vec::new(), + }; + registry::visit_registry(&mut collector); + collector.enabled_rules +} + +/// Run pglinter rules against the database +pub async fn run_pglinter( + params: PglinterParams<'_>, + filter: &AnalysisFilter<'_>, + cache: Option<&PglinterCache>, +) -> Result, sqlx::Error> { + let mut results = vec![]; + + // Check extension installed + let extension_installed = cache.map(|c| c.extension_installed).unwrap_or_else(|| { + params + .schema_cache + .extensions + .iter() + .any(|e| e.name == "pglinter") + }); + + // Collect enabled rules from config + let enabled_rules = collect_enabled_rules(filter); + + if !extension_installed { + if !enabled_rules.is_empty() { + results.push(PglinterDiagnostic::extension_not_installed()); + } + return Ok(results); + } + + if enabled_rules.is_empty() { + return Ok(results); + } + + // Get disabled rules from extension + let disabled_in_extension = match cache { + Some(c) => c.disabled_rules.clone(), + None => cache::get_disabled_rules(params.conn).await?, + }; + + // Check for mismatches and collect runnable rules + let mut runnable_rules = Vec::new(); + for rule_code in &enabled_rules { + if disabled_in_extension.contains(rule_code) { + results.push(PglinterDiagnostic::rule_disabled_in_extension(rule_code)); + } else { + runnable_rules.push(rule_code.clone()); + } + } + + if runnable_rules.is_empty() { + return Ok(results); + } + + // Execute each rule + for rule_code in &runnable_rules { + if let Some(diags) = execute_rule(params.conn, rule_code).await? { + results.extend(diags); + } + } + + Ok(results) +} + +/// Execute a single pglinter rule using pglinter.check(rule_code) +/// Returns true if the rule detected issues +async fn execute_rule( + conn: &PgPool, + rule_code: &str, +) -> Result>, sqlx::Error> { + let has_issues: bool = sqlx::query_scalar("SELECT pglinter.check($1)") + .bind(rule_code) + .fetch_one(conn) + .await?; + + if !has_issues { + return Ok(None); + } + + // Rule fired - create diagnostic from our known metadata + if let Some(diag) = PglinterDiagnostic::from_rule_code(rule_code) { + Ok(Some(vec![diag])) + } else { + Ok(None) + } +} diff --git a/crates/pgls_pglinter/src/registry.rs b/crates/pgls_pglinter/src/registry.rs new file mode 100644 index 000000000..1ec8516b6 --- /dev/null +++ b/crates/pgls_pglinter/src/registry.rs @@ -0,0 +1,463 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use pgls_analyse::RegistryVisitor; +use pgls_diagnostics::Category; +#[doc = r" Metadata for a pglinter rule"] +#[derive(Debug, Clone, Copy)] +pub struct RuleMetadata { + #[doc = r#" Rule code (e.g., "B001")"#] + pub code: &'static str, + #[doc = r" Rule name in camelCase"] + pub name: &'static str, + #[doc = r" Rule scope (BASE, SCHEMA, CLUSTER)"] + pub scope: &'static str, + #[doc = r" Description of what the rule detects"] + pub description: &'static str, + #[doc = r" Suggested fixes"] + pub fixes: &'static [&'static str], +} +#[doc = r" Visit all pglinter rules using the visitor pattern"] +pub fn visit_registry(registry: &mut V) { + registry.record_category::(); +} +#[doc = r" Get the pglinter rule code from the camelCase name"] +pub fn get_rule_code(name: &str) -> Option<&'static str> { + match name { + "compositePrimaryKeyTooManyColumns" => Some("B012"), + "howManyObjectsWithUppercase" => Some("B005"), + "howManyRedudantIndex" => Some("B002"), + "howManyTableWithoutIndexOnFk" => Some("B003"), + "howManyTableWithoutPrimaryKey" => Some("B001"), + "howManyTablesNeverSelected" => Some("B006"), + "howManyTablesWithFkMismatch" => Some("B008"), + "howManyTablesWithFkOutsideSchema" => Some("B007"), + "howManyTablesWithReservedKeywords" => Some("B010"), + "howManyTablesWithSameTrigger" => Some("B009"), + "howManyUnusedIndex" => Some("B004"), + "ownerSchemaIsInternalRole" => Some("S004"), + "passwordEncryptionIsMd5" => Some("C003"), + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => Some("C002"), + "pgHbaEntriesWithMethodTrustShouldNotExists" => Some("C001"), + "schemaOwnerDoNotMatchTableOwner" => Some("S005"), + "schemaPrefixedOrSuffixedWithEnvt" => Some("S002"), + "schemaWithDefaultRoleNotGranted" => Some("S001"), + "severalTableOwnerInSchema" => Some("B011"), + "unsecuredPublicSchema" => Some("S003"), + _ => None, + } +} +#[doc = r" Get the diagnostic category for a rule code"] +pub fn get_rule_category(code: &str) -> Option<&'static Category> { + match code { + "B012" => Some(::pgls_diagnostics::category!( + "pglinter/base/compositePrimaryKeyTooManyColumns" + )), + "B005" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyObjectsWithUppercase" + )), + "B002" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyRedudantIndex" + )), + "B003" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTableWithoutIndexOnFk" + )), + "B001" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTableWithoutPrimaryKey" + )), + "B006" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesNeverSelected" + )), + "B008" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithFkMismatch" + )), + "B007" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithFkOutsideSchema" + )), + "B010" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithReservedKeywords" + )), + "B009" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithSameTrigger" + )), + "B004" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyUnusedIndex" + )), + "S004" => Some(::pgls_diagnostics::category!( + "pglinter/schema/ownerSchemaIsInternalRole" + )), + "C003" => Some(::pgls_diagnostics::category!( + "pglinter/cluster/passwordEncryptionIsMd5" + )), + "C002" => Some(::pgls_diagnostics::category!( + "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" + )), + "C001" => Some(::pgls_diagnostics::category!( + "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists" + )), + "S005" => Some(::pgls_diagnostics::category!( + "pglinter/schema/schemaOwnerDoNotMatchTableOwner" + )), + "S002" => Some(::pgls_diagnostics::category!( + "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt" + )), + "S001" => Some(::pgls_diagnostics::category!( + "pglinter/schema/schemaWithDefaultRoleNotGranted" + )), + "B011" => Some(::pgls_diagnostics::category!( + "pglinter/base/severalTableOwnerInSchema" + )), + "S003" => Some(::pgls_diagnostics::category!( + "pglinter/schema/unsecuredPublicSchema" + )), + _ => None, + } +} +#[doc = r" Get rule metadata by name (camelCase)"] +pub fn get_rule_metadata(name: &str) -> Option { + match name { + "compositePrimaryKeyTooManyColumns" => Some(RuleMetadata { + code: "B012", + name: "compositePrimaryKeyTooManyColumns", + scope: "BASE", + description: "Detect tables with composite primary keys involving more than 4 columns", + fixes: &[ + "Consider redesigning the table to avoid composite primary keys with more than 4 columns", + "Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.", + ], + }), + "howManyObjectsWithUppercase" => Some(RuleMetadata { + code: "B005", + name: "howManyObjectsWithUppercase", + scope: "BASE", + description: "Count number of objects with uppercase in name or in columns.", + fixes: &["Do not use uppercase for any database objects"], + }), + "howManyRedudantIndex" => Some(RuleMetadata { + code: "B002", + name: "howManyRedudantIndex", + scope: "BASE", + description: "Count number of redundant index vs nb index.", + fixes: &[ + "remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold", + ], + }), + "howManyTableWithoutIndexOnFk" => Some(RuleMetadata { + code: "B003", + name: "howManyTableWithoutIndexOnFk", + scope: "BASE", + description: "Count number of tables without index on foreign key.", + fixes: &["create a index on foreign key or change warning/error threshold"], + }), + "howManyTableWithoutPrimaryKey" => Some(RuleMetadata { + code: "B001", + name: "howManyTableWithoutPrimaryKey", + scope: "BASE", + description: "Count number of tables without primary key.", + fixes: &["create a primary key or change warning/error threshold"], + }), + "howManyTablesNeverSelected" => Some(RuleMetadata { + code: "B006", + name: "howManyTablesNeverSelected", + scope: "BASE", + description: "Count number of table(s) that has never been selected.", + fixes: &[ + "Is it necessary to update/delete/insert rows in table(s) that are never selected ?", + ], + }), + "howManyTablesWithFkMismatch" => Some(RuleMetadata { + code: "B008", + name: "howManyTablesWithFkMismatch", + scope: "BASE", + description: "Count number of tables with foreign keys that do not match the key reference type.", + fixes: &[ + "Consider column type adjustments to ensure foreign key matches referenced key type", + "ask a dba", + ], + }), + "howManyTablesWithFkOutsideSchema" => Some(RuleMetadata { + code: "B007", + name: "howManyTablesWithFkOutsideSchema", + scope: "BASE", + description: "Count number of tables with foreign keys outside their schema.", + fixes: &[ + "Consider restructuring schema design to keep related tables in same schema", + "ask a dba", + ], + }), + "howManyTablesWithReservedKeywords" => Some(RuleMetadata { + code: "B010", + name: "howManyTablesWithReservedKeywords", + scope: "BASE", + description: "Count number of database objects using reserved keywords in their names.", + fixes: &[ + "Rename database objects to avoid using reserved keywords.", + "Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.", + ], + }), + "howManyTablesWithSameTrigger" => Some(RuleMetadata { + code: "B009", + name: "howManyTablesWithSameTrigger", + scope: "BASE", + description: "Count number of tables using the same trigger vs nb table with their own triggers.", + fixes: &[ + "For more readability and other considerations use one trigger function per table.", + "Sharing the same trigger function add more complexity.", + ], + }), + "howManyUnusedIndex" => Some(RuleMetadata { + code: "B004", + name: "howManyUnusedIndex", + scope: "BASE", + description: "Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)", + fixes: &["remove unused index or change warning/error threshold"], + }), + "ownerSchemaIsInternalRole" => Some(RuleMetadata { + code: "S004", + name: "ownerSchemaIsInternalRole", + scope: "SCHEMA", + description: "Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).", + fixes: &["change schema owner to a functional role"], + }), + "passwordEncryptionIsMd5" => Some(RuleMetadata { + code: "C003", + name: "passwordEncryptionIsMd5", + scope: "CLUSTER", + description: "This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.", + fixes: &[ + "change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = ", + "scram-sha-256", + " ). Warning, you will need to reset all passwords after this parameter is updated.", + ], + }), + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => Some(RuleMetadata { + code: "C002", + name: "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust or password method in pg_hba.conf"], + }), + "pgHbaEntriesWithMethodTrustShouldNotExists" => Some(RuleMetadata { + code: "C001", + name: "pgHbaEntriesWithMethodTrustShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust method in pg_hba.conf"], + }), + "schemaOwnerDoNotMatchTableOwner" => Some(RuleMetadata { + code: "S005", + name: "schemaOwnerDoNotMatchTableOwner", + scope: "SCHEMA", + description: "The schema owner and tables in the schema do not match.", + fixes: &["For maintenance facilities, schema and tables owners should be the same."], + }), + "schemaPrefixedOrSuffixedWithEnvt" => Some(RuleMetadata { + code: "S002", + name: "schemaPrefixedOrSuffixedWithEnvt", + scope: "SCHEMA", + description: "The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.", + fixes: &[ + "Keep the same schema name across environments. Prefer prefix or suffix the database name", + ], + }), + "schemaWithDefaultRoleNotGranted" => Some(RuleMetadata { + code: "S001", + name: "schemaWithDefaultRoleNotGranted", + scope: "SCHEMA", + description: "The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.", + fixes: &[ + "add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user ", + ], + }), + "severalTableOwnerInSchema" => Some(RuleMetadata { + code: "B011", + name: "severalTableOwnerInSchema", + scope: "BASE", + description: "In a schema there are several tables owned by different owners.", + fixes: &["change table owners to the same functional role"], + }), + "unsecuredPublicSchema" => Some(RuleMetadata { + code: "S003", + name: "unsecuredPublicSchema", + scope: "SCHEMA", + description: "Only authorized users should be allowed to create objects.", + fixes: &["REVOKE CREATE ON SCHEMA FROM PUBLIC"], + }), + _ => None, + } +} +#[doc = r#" Get rule metadata by code (e.g., "B001", "S001", "C001")"#] +pub fn get_rule_metadata_by_code(code: &str) -> Option { + match code { + "B012" => Some(RuleMetadata { + code: "B012", + name: "compositePrimaryKeyTooManyColumns", + scope: "BASE", + description: "Detect tables with composite primary keys involving more than 4 columns", + fixes: &[ + "Consider redesigning the table to avoid composite primary keys with more than 4 columns", + "Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.", + ], + }), + "B005" => Some(RuleMetadata { + code: "B005", + name: "howManyObjectsWithUppercase", + scope: "BASE", + description: "Count number of objects with uppercase in name or in columns.", + fixes: &["Do not use uppercase for any database objects"], + }), + "B002" => Some(RuleMetadata { + code: "B002", + name: "howManyRedudantIndex", + scope: "BASE", + description: "Count number of redundant index vs nb index.", + fixes: &[ + "remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold", + ], + }), + "B003" => Some(RuleMetadata { + code: "B003", + name: "howManyTableWithoutIndexOnFk", + scope: "BASE", + description: "Count number of tables without index on foreign key.", + fixes: &["create a index on foreign key or change warning/error threshold"], + }), + "B001" => Some(RuleMetadata { + code: "B001", + name: "howManyTableWithoutPrimaryKey", + scope: "BASE", + description: "Count number of tables without primary key.", + fixes: &["create a primary key or change warning/error threshold"], + }), + "B006" => Some(RuleMetadata { + code: "B006", + name: "howManyTablesNeverSelected", + scope: "BASE", + description: "Count number of table(s) that has never been selected.", + fixes: &[ + "Is it necessary to update/delete/insert rows in table(s) that are never selected ?", + ], + }), + "B008" => Some(RuleMetadata { + code: "B008", + name: "howManyTablesWithFkMismatch", + scope: "BASE", + description: "Count number of tables with foreign keys that do not match the key reference type.", + fixes: &[ + "Consider column type adjustments to ensure foreign key matches referenced key type", + "ask a dba", + ], + }), + "B007" => Some(RuleMetadata { + code: "B007", + name: "howManyTablesWithFkOutsideSchema", + scope: "BASE", + description: "Count number of tables with foreign keys outside their schema.", + fixes: &[ + "Consider restructuring schema design to keep related tables in same schema", + "ask a dba", + ], + }), + "B010" => Some(RuleMetadata { + code: "B010", + name: "howManyTablesWithReservedKeywords", + scope: "BASE", + description: "Count number of database objects using reserved keywords in their names.", + fixes: &[ + "Rename database objects to avoid using reserved keywords.", + "Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.", + ], + }), + "B009" => Some(RuleMetadata { + code: "B009", + name: "howManyTablesWithSameTrigger", + scope: "BASE", + description: "Count number of tables using the same trigger vs nb table with their own triggers.", + fixes: &[ + "For more readability and other considerations use one trigger function per table.", + "Sharing the same trigger function add more complexity.", + ], + }), + "B004" => Some(RuleMetadata { + code: "B004", + name: "howManyUnusedIndex", + scope: "BASE", + description: "Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)", + fixes: &["remove unused index or change warning/error threshold"], + }), + "S004" => Some(RuleMetadata { + code: "S004", + name: "ownerSchemaIsInternalRole", + scope: "SCHEMA", + description: "Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).", + fixes: &["change schema owner to a functional role"], + }), + "C003" => Some(RuleMetadata { + code: "C003", + name: "passwordEncryptionIsMd5", + scope: "CLUSTER", + description: "This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.", + fixes: &[ + "change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = ", + "scram-sha-256", + " ). Warning, you will need to reset all passwords after this parameter is updated.", + ], + }), + "C002" => Some(RuleMetadata { + code: "C002", + name: "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust or password method in pg_hba.conf"], + }), + "C001" => Some(RuleMetadata { + code: "C001", + name: "pgHbaEntriesWithMethodTrustShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust method in pg_hba.conf"], + }), + "S005" => Some(RuleMetadata { + code: "S005", + name: "schemaOwnerDoNotMatchTableOwner", + scope: "SCHEMA", + description: "The schema owner and tables in the schema do not match.", + fixes: &["For maintenance facilities, schema and tables owners should be the same."], + }), + "S002" => Some(RuleMetadata { + code: "S002", + name: "schemaPrefixedOrSuffixedWithEnvt", + scope: "SCHEMA", + description: "The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.", + fixes: &[ + "Keep the same schema name across environments. Prefer prefix or suffix the database name", + ], + }), + "S001" => Some(RuleMetadata { + code: "S001", + name: "schemaWithDefaultRoleNotGranted", + scope: "SCHEMA", + description: "The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.", + fixes: &[ + "add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user ", + ], + }), + "B011" => Some(RuleMetadata { + code: "B011", + name: "severalTableOwnerInSchema", + scope: "BASE", + description: "In a schema there are several tables owned by different owners.", + fixes: &["change table owners to the same functional role"], + }), + "S003" => Some(RuleMetadata { + code: "S003", + name: "unsecuredPublicSchema", + scope: "SCHEMA", + description: "Only authorized users should be allowed to create objects.", + fixes: &["REVOKE CREATE ON SCHEMA FROM PUBLIC"], + }), + _ => None, + } +} diff --git a/crates/pgls_pglinter/src/rule.rs b/crates/pgls_pglinter/src/rule.rs new file mode 100644 index 000000000..43941a770 --- /dev/null +++ b/crates/pgls_pglinter/src/rule.rs @@ -0,0 +1,21 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use pgls_analyse::RuleMeta; +#[doc = r" Trait for pglinter (database-level) rules"] +#[doc = r""] +#[doc = r" Pglinter rules are different from linter rules:"] +#[doc = r" - They execute SQL queries against the database via pglinter extension"] +#[doc = r" - They don't have AST-based execution"] +#[doc = r" - Rule logic is in the pglinter Postgres extension"] +#[doc = r" - Threshold configuration (warning/error levels) is handled by pglinter extension"] +pub trait PglinterRule: RuleMeta { + #[doc = r#" Rule code (e.g., "B001", "S001", "C001")"#] + const CODE: &'static str; + #[doc = r" Rule scope (BASE, SCHEMA, or CLUSTER)"] + const SCOPE: &'static str; + #[doc = r" Description of what the rule detects"] + const DESCRIPTION: &'static str; + #[doc = r" Suggested fixes for violations"] + const FIXES: &'static [&'static str]; +} diff --git a/crates/pgls_pglinter/src/rules/base/composite_primary_key_too_many_columns.rs b/crates/pgls_pglinter/src/rules/base/composite_primary_key_too_many_columns.rs new file mode 100644 index 000000000..7a04cf2be --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/composite_primary_key_too_many_columns.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# CompositePrimaryKeyTooManyColumns (B012)\n\nDetect tables with composite primary keys involving more than 4 columns\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"compositePrimaryKeyTooManyColumns\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- Consider redesigning the table to avoid composite primary keys with more than 4 columns\n- Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.\n\n## Documentation\n\nSee: "] pub CompositePrimaryKeyTooManyColumns { version : "1.0.0" , name : "compositePrimaryKeyTooManyColumns" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for CompositePrimaryKeyTooManyColumns { + const CODE: &'static str = "B012"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Detect tables with composite primary keys involving more than 4 columns"; + const FIXES: &'static [&'static str] = &[ + "Consider redesigning the table to avoid composite primary keys with more than 4 columns", + "Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_objects_with_uppercase.rs b/crates/pgls_pglinter/src/rules/base/how_many_objects_with_uppercase.rs new file mode 100644 index 000000000..8210f0068 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_objects_with_uppercase.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyObjectsWithUppercase (B005)\n\nCount number of objects with uppercase in name or in columns.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyObjectsWithUppercase\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- Do not use uppercase for any database objects\n\n## Documentation\n\nSee: "] pub HowManyObjectsWithUppercase { version : "1.0.0" , name : "howManyObjectsWithUppercase" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyObjectsWithUppercase { + const CODE: &'static str = "B005"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of objects with uppercase in name or in columns."; + const FIXES: &'static [&'static str] = &["Do not use uppercase for any database objects"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_redudant_index.rs b/crates/pgls_pglinter/src/rules/base/how_many_redudant_index.rs new file mode 100644 index 000000000..82ce8de0b --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_redudant_index.rs @@ -0,0 +1,13 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyRedudantIndex (B002)\n\nCount number of redundant index vs nb index.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyRedudantIndex\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyRedudantIndex { version : "1.0.0" , name : "howManyRedudantIndex" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyRedudantIndex { + const CODE: &'static str = "B002"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of redundant index vs nb index."; + const FIXES: &'static [&'static str] = &[ + "remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_table_without_index_on_fk.rs b/crates/pgls_pglinter/src/rules/base/how_many_table_without_index_on_fk.rs new file mode 100644 index 000000000..a737d529a --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_table_without_index_on_fk.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTableWithoutIndexOnFk (B003)\n\nCount number of tables without index on foreign key.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTableWithoutIndexOnFk\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- create a index on foreign key or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyTableWithoutIndexOnFk { version : "1.0.0" , name : "howManyTableWithoutIndexOnFk" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTableWithoutIndexOnFk { + const CODE: &'static str = "B003"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of tables without index on foreign key."; + const FIXES: &'static [&'static str] = + &["create a index on foreign key or change warning/error threshold"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_table_without_primary_key.rs b/crates/pgls_pglinter/src/rules/base/how_many_table_without_primary_key.rs new file mode 100644 index 000000000..b708f7055 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_table_without_primary_key.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTableWithoutPrimaryKey (B001)\n\nCount number of tables without primary key.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTableWithoutPrimaryKey\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- create a primary key or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyTableWithoutPrimaryKey { version : "1.0.0" , name : "howManyTableWithoutPrimaryKey" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTableWithoutPrimaryKey { + const CODE: &'static str = "B001"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of tables without primary key."; + const FIXES: &'static [&'static str] = + &["create a primary key or change warning/error threshold"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_never_selected.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_never_selected.rs new file mode 100644 index 000000000..4721485d6 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_never_selected.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesNeverSelected (B006)\n\nCount number of table(s) that has never been selected.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesNeverSelected\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- Is it necessary to update/delete/insert rows in table(s) that are never selected ?\n\n## Documentation\n\nSee: "] pub HowManyTablesNeverSelected { version : "1.0.0" , name : "howManyTablesNeverSelected" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesNeverSelected { + const CODE: &'static str = "B006"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of table(s) that has never been selected."; + const FIXES: &'static [&'static str] = + &["Is it necessary to update/delete/insert rows in table(s) that are never selected ?"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_mismatch.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_mismatch.rs new file mode 100644 index 000000000..3e887c1d4 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_mismatch.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithFkMismatch (B008)\n\nCount number of tables with foreign keys that do not match the key reference type.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithFkMismatch\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- Consider column type adjustments to ensure foreign key matches referenced key type\n- ask a dba\n\n## Documentation\n\nSee: "] pub HowManyTablesWithFkMismatch { version : "1.0.0" , name : "howManyTablesWithFkMismatch" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithFkMismatch { + const CODE: &'static str = "B008"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of tables with foreign keys that do not match the key reference type."; + const FIXES: &'static [&'static str] = &[ + "Consider column type adjustments to ensure foreign key matches referenced key type", + "ask a dba", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_outside_schema.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_outside_schema.rs new file mode 100644 index 000000000..f25318891 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_outside_schema.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithFkOutsideSchema (B007)\n\nCount number of tables with foreign keys outside their schema.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithFkOutsideSchema\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- Consider restructuring schema design to keep related tables in same schema\n- ask a dba\n\n## Documentation\n\nSee: "] pub HowManyTablesWithFkOutsideSchema { version : "1.0.0" , name : "howManyTablesWithFkOutsideSchema" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithFkOutsideSchema { + const CODE: &'static str = "B007"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of tables with foreign keys outside their schema."; + const FIXES: &'static [&'static str] = &[ + "Consider restructuring schema design to keep related tables in same schema", + "ask a dba", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_reserved_keywords.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_reserved_keywords.rs new file mode 100644 index 000000000..7b7efda8f --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_reserved_keywords.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithReservedKeywords (B010)\n\nCount number of database objects using reserved keywords in their names.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithReservedKeywords\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- Rename database objects to avoid using reserved keywords.\n- Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.\n\n## Documentation\n\nSee: "] pub HowManyTablesWithReservedKeywords { version : "1.0.0" , name : "howManyTablesWithReservedKeywords" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithReservedKeywords { + const CODE: &'static str = "B010"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of database objects using reserved keywords in their names."; + const FIXES: &'static [&'static str] = &[ + "Rename database objects to avoid using reserved keywords.", + "Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_same_trigger.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_same_trigger.rs new file mode 100644 index 000000000..70cef3a36 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_same_trigger.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithSameTrigger (B009)\n\nCount number of tables using the same trigger vs nb table with their own triggers.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithSameTrigger\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- For more readability and other considerations use one trigger function per table.\n- Sharing the same trigger function add more complexity.\n\n## Documentation\n\nSee: "] pub HowManyTablesWithSameTrigger { version : "1.0.0" , name : "howManyTablesWithSameTrigger" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithSameTrigger { + const CODE: &'static str = "B009"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of tables using the same trigger vs nb table with their own triggers."; + const FIXES: &'static [&'static str] = &[ + "For more readability and other considerations use one trigger function per table.", + "Sharing the same trigger function add more complexity.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_unused_index.rs b/crates/pgls_pglinter/src/rules/base/how_many_unused_index.rs new file mode 100644 index 000000000..805fb5a16 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_unused_index.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyUnusedIndex (B004)\n\nCount number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyUnusedIndex\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- remove unused index or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyUnusedIndex { version : "1.0.0" , name : "howManyUnusedIndex" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyUnusedIndex { + const CODE: &'static str = "B004"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)"; + const FIXES: &'static [&'static str] = + &["remove unused index or change warning/error threshold"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/mod.rs b/crates/pgls_pglinter/src/rules/base/mod.rs new file mode 100644 index 000000000..c51a6b08d --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/mod.rs @@ -0,0 +1,16 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod composite_primary_key_too_many_columns; +pub mod how_many_objects_with_uppercase; +pub mod how_many_redudant_index; +pub mod how_many_table_without_index_on_fk; +pub mod how_many_table_without_primary_key; +pub mod how_many_tables_never_selected; +pub mod how_many_tables_with_fk_mismatch; +pub mod how_many_tables_with_fk_outside_schema; +pub mod how_many_tables_with_reserved_keywords; +pub mod how_many_tables_with_same_trigger; +pub mod how_many_unused_index; +pub mod several_table_owner_in_schema; +::pgls_analyse::declare_lint_group! { pub Base { name : "base" , rules : [self :: composite_primary_key_too_many_columns :: CompositePrimaryKeyTooManyColumns , self :: how_many_objects_with_uppercase :: HowManyObjectsWithUppercase , self :: how_many_redudant_index :: HowManyRedudantIndex , self :: how_many_table_without_index_on_fk :: HowManyTableWithoutIndexOnFk , self :: how_many_table_without_primary_key :: HowManyTableWithoutPrimaryKey , self :: how_many_tables_never_selected :: HowManyTablesNeverSelected , self :: how_many_tables_with_fk_mismatch :: HowManyTablesWithFkMismatch , self :: how_many_tables_with_fk_outside_schema :: HowManyTablesWithFkOutsideSchema , self :: how_many_tables_with_reserved_keywords :: HowManyTablesWithReservedKeywords , self :: how_many_tables_with_same_trigger :: HowManyTablesWithSameTrigger , self :: how_many_unused_index :: HowManyUnusedIndex , self :: several_table_owner_in_schema :: SeveralTableOwnerInSchema ,] } } diff --git a/crates/pgls_pglinter/src/rules/base/several_table_owner_in_schema.rs b/crates/pgls_pglinter/src/rules/base/several_table_owner_in_schema.rs new file mode 100644 index 000000000..d42899411 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/several_table_owner_in_schema.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SeveralTableOwnerInSchema (B011)\n\nIn a schema there are several tables owned by different owners.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"severalTableOwnerInSchema\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- change table owners to the same functional role\n\n## Documentation\n\nSee: "] pub SeveralTableOwnerInSchema { version : "1.0.0" , name : "severalTableOwnerInSchema" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SeveralTableOwnerInSchema { + const CODE: &'static str = "B011"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "In a schema there are several tables owned by different owners."; + const FIXES: &'static [&'static str] = &["change table owners to the same functional role"]; +} diff --git a/crates/pgls_pglinter/src/rules/cluster/mod.rs b/crates/pgls_pglinter/src/rules/cluster/mod.rs new file mode 100644 index 000000000..7e948dd5c --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/mod.rs @@ -0,0 +1,7 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod password_encryption_is_md5; +pub mod pg_hba_entries_with_method_trust_or_password_should_not_exists; +pub mod pg_hba_entries_with_method_trust_should_not_exists; +::pgls_analyse::declare_lint_group! { pub Cluster { name : "cluster" , rules : [self :: password_encryption_is_md5 :: PasswordEncryptionIsMd5 , self :: pg_hba_entries_with_method_trust_or_password_should_not_exists :: PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists , self :: pg_hba_entries_with_method_trust_should_not_exists :: PgHbaEntriesWithMethodTrustShouldNotExists ,] } } diff --git a/crates/pgls_pglinter/src/rules/cluster/password_encryption_is_md5.rs b/crates/pgls_pglinter/src/rules/cluster/password_encryption_is_md5.rs new file mode 100644 index 000000000..7dad19a17 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/password_encryption_is_md5.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# PasswordEncryptionIsMd5 (C003)\n\nThis configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"cluster\": {\n \"passwordEncryptionIsMd5\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = \n- scram-sha-256\n- ). Warning, you will need to reset all passwords after this parameter is updated.\n\n## Documentation\n\nSee: "] pub PasswordEncryptionIsMd5 { version : "1.0.0" , name : "passwordEncryptionIsMd5" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for PasswordEncryptionIsMd5 { + const CODE: &'static str = "C003"; + const SCOPE: &'static str = "CLUSTER"; + const DESCRIPTION: &'static str = "This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256."; + const FIXES: &'static [&'static str] = &[ + "change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = ", + "scram-sha-256", + " ). Warning, you will need to reset all passwords after this parameter is updated.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_or_password_should_not_exists.rs b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_or_password_should_not_exists.rs new file mode 100644 index 000000000..8cbb7e5d6 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_or_password_should_not_exists.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002)\n\nThis configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"cluster\": {\n \"pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change trust or password method in pg_hba.conf\n\n## Documentation\n\nSee: "] pub PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists { version : "1.0.0" , name : "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists { + const CODE: &'static str = "C002"; + const SCOPE: &'static str = "CLUSTER"; + const DESCRIPTION: &'static str = "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."; + const FIXES: &'static [&'static str] = &["change trust or password method in pg_hba.conf"]; +} diff --git a/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_should_not_exists.rs b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_should_not_exists.rs new file mode 100644 index 000000000..096ae3bbc --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_should_not_exists.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# PgHbaEntriesWithMethodTrustShouldNotExists (C001)\n\nThis configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"cluster\": {\n \"pgHbaEntriesWithMethodTrustShouldNotExists\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change trust method in pg_hba.conf\n\n## Documentation\n\nSee: "] pub PgHbaEntriesWithMethodTrustShouldNotExists { version : "1.0.0" , name : "pgHbaEntriesWithMethodTrustShouldNotExists" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for PgHbaEntriesWithMethodTrustShouldNotExists { + const CODE: &'static str = "C001"; + const SCOPE: &'static str = "CLUSTER"; + const DESCRIPTION: &'static str = "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."; + const FIXES: &'static [&'static str] = &["change trust method in pg_hba.conf"]; +} diff --git a/crates/pgls_pglinter/src/rules/mod.rs b/crates/pgls_pglinter/src/rules/mod.rs new file mode 100644 index 000000000..428719e86 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/mod.rs @@ -0,0 +1,7 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod base; +pub mod cluster; +pub mod schema; +::pgls_analyse::declare_category! { pub PgLinter { kind : Lint , groups : [self :: base :: Base , self :: cluster :: Cluster , self :: schema :: Schema ,] } } diff --git a/crates/pgls_pglinter/src/rules/schema/mod.rs b/crates/pgls_pglinter/src/rules/schema/mod.rs new file mode 100644 index 000000000..772dfa25c --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/mod.rs @@ -0,0 +1,9 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod owner_schema_is_internal_role; +pub mod schema_owner_do_not_match_table_owner; +pub mod schema_prefixed_or_suffixed_with_envt; +pub mod schema_with_default_role_not_granted; +pub mod unsecured_public_schema; +::pgls_analyse::declare_lint_group! { pub Schema { name : "schema" , rules : [self :: owner_schema_is_internal_role :: OwnerSchemaIsInternalRole , self :: schema_owner_do_not_match_table_owner :: SchemaOwnerDoNotMatchTableOwner , self :: schema_prefixed_or_suffixed_with_envt :: SchemaPrefixedOrSuffixedWithEnvt , self :: schema_with_default_role_not_granted :: SchemaWithDefaultRoleNotGranted , self :: unsecured_public_schema :: UnsecuredPublicSchema ,] } } diff --git a/crates/pgls_pglinter/src/rules/schema/owner_schema_is_internal_role.rs b/crates/pgls_pglinter/src/rules/schema/owner_schema_is_internal_role.rs new file mode 100644 index 000000000..7abbc1d89 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/owner_schema_is_internal_role.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# OwnerSchemaIsInternalRole (S004)\n\nOwner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"ownerSchemaIsInternalRole\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change schema owner to a functional role\n\n## Documentation\n\nSee: "] pub OwnerSchemaIsInternalRole { version : "1.0.0" , name : "ownerSchemaIsInternalRole" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for OwnerSchemaIsInternalRole { + const CODE: &'static str = "S004"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary)."; + const FIXES: &'static [&'static str] = &["change schema owner to a functional role"]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/schema_owner_do_not_match_table_owner.rs b/crates/pgls_pglinter/src/rules/schema/schema_owner_do_not_match_table_owner.rs new file mode 100644 index 000000000..8072bfa1d --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/schema_owner_do_not_match_table_owner.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SchemaOwnerDoNotMatchTableOwner (S005)\n\nThe schema owner and tables in the schema do not match.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"schemaOwnerDoNotMatchTableOwner\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- For maintenance facilities, schema and tables owners should be the same.\n\n## Documentation\n\nSee: "] pub SchemaOwnerDoNotMatchTableOwner { version : "1.0.0" , name : "schemaOwnerDoNotMatchTableOwner" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SchemaOwnerDoNotMatchTableOwner { + const CODE: &'static str = "S005"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "The schema owner and tables in the schema do not match."; + const FIXES: &'static [&'static str] = + &["For maintenance facilities, schema and tables owners should be the same."]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/schema_prefixed_or_suffixed_with_envt.rs b/crates/pgls_pglinter/src/rules/schema/schema_prefixed_or_suffixed_with_envt.rs new file mode 100644 index 000000000..4c6ad66a2 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/schema_prefixed_or_suffixed_with_envt.rs @@ -0,0 +1,13 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SchemaPrefixedOrSuffixedWithEnvt (S002)\n\nThe schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"schemaPrefixedOrSuffixedWithEnvt\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 1%\n\n## Fixes\n\n- Keep the same schema name across environments. Prefer prefix or suffix the database name\n\n## Documentation\n\nSee: "] pub SchemaPrefixedOrSuffixedWithEnvt { version : "1.0.0" , name : "schemaPrefixedOrSuffixedWithEnvt" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SchemaPrefixedOrSuffixedWithEnvt { + const CODE: &'static str = "S002"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy."; + const FIXES: &'static [&'static str] = &[ + "Keep the same schema name across environments. Prefer prefix or suffix the database name", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/schema_with_default_role_not_granted.rs b/crates/pgls_pglinter/src/rules/schema/schema_with_default_role_not_granted.rs new file mode 100644 index 000000000..eb7c221a8 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/schema_with_default_role_not_granted.rs @@ -0,0 +1,14 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SchemaWithDefaultRoleNotGranted (S001)\n\nThe schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"schemaWithDefaultRoleNotGranted\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 1%\n\n## Fixes\n\n- add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user \n\n## Documentation\n\nSee: "] pub SchemaWithDefaultRoleNotGranted { version : "1.0.0" , name : "schemaWithDefaultRoleNotGranted" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SchemaWithDefaultRoleNotGranted { + const CODE: &'static str = "S001"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it."; + const FIXES: &'static [&'static str] = &[ + "add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user ", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/unsecured_public_schema.rs b/crates/pgls_pglinter/src/rules/schema/unsecured_public_schema.rs new file mode 100644 index 000000000..b71069ff6 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/unsecured_public_schema.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# UnsecuredPublicSchema (S003)\n\nOnly authorized users should be allowed to create objects.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"unsecuredPublicSchema\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- REVOKE CREATE ON SCHEMA FROM PUBLIC\n\n## Documentation\n\nSee: "] pub UnsecuredPublicSchema { version : "1.0.0" , name : "unsecuredPublicSchema" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for UnsecuredPublicSchema { + const CODE: &'static str = "S003"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "Only authorized users should be allowed to create objects."; + const FIXES: &'static [&'static str] = &["REVOKE CREATE ON SCHEMA FROM PUBLIC"]; +} diff --git a/crates/pgls_pglinter/tests/diagnostics.rs b/crates/pgls_pglinter/tests/diagnostics.rs new file mode 100644 index 000000000..971678b8a --- /dev/null +++ b/crates/pgls_pglinter/tests/diagnostics.rs @@ -0,0 +1,316 @@ +//! Integration tests for pglinter diagnostics +//! +//! These tests configure pglinter thresholds to 0% so rules fire deterministically. +//! +//! Note: These tests require the pglinter extension to be installed, which is only +//! available on Linux (via Docker) and macOS. Windows CI does not have pglinter. + +#![cfg(not(target_os = "windows"))] + +use pgls_analyse::AnalysisFilter; +use pgls_console::fmt::{Formatter, HTML}; +use pgls_diagnostics::{Diagnostic, LogCategory, Visit}; +use pgls_pglinter::{PglinterCache, PglinterParams, run_pglinter}; +use pgls_schema_cache::SchemaCache; +use sqlx::PgPool; +use std::fmt::Write; +use std::io; + +struct TestVisitor { + logs: Vec, +} + +impl TestVisitor { + fn new() -> Self { + Self { logs: Vec::new() } + } + + fn into_string(self) -> String { + self.logs.join("\n") + } +} + +impl Visit for TestVisitor { + fn record_log( + &mut self, + category: LogCategory, + text: &dyn pgls_console::fmt::Display, + ) -> io::Result<()> { + let prefix = match category { + LogCategory::None => "", + LogCategory::Info => "[Info] ", + LogCategory::Warn => "[Warn] ", + LogCategory::Error => "[Error] ", + }; + + let mut buffer = vec![]; + let mut writer = HTML::new(&mut buffer); + let mut formatter = Formatter::new(&mut writer); + text.fmt(&mut formatter)?; + + let text_str = String::from_utf8(buffer).unwrap(); + self.logs.push(format!("{prefix}{text_str}")); + Ok(()) + } +} + +/// Configure pglinter for deterministic testing: +/// - Set all thresholds to 0% warning, 1% error so any violation triggers +/// - Disable cluster-level rules that depend on pg_hba.conf +async fn configure_pglinter_for_tests(pool: &PgPool) { + // Set thresholds to 0% warning for deterministic behavior + let rules_to_configure = [ + "B001", "B002", "B003", "B004", "B005", "B006", "B007", "B008", "B009", "B010", "B011", + "B012", "S001", "S002", "S003", "S004", "S005", + ]; + + for rule in rules_to_configure { + let _ = sqlx::query("SELECT pglinter.update_rule_levels($1, 0, 1)") + .bind(rule) + .execute(pool) + .await; + } + + // Disable cluster-level rules (depend on pg_hba.conf, not deterministic) + for rule in ["C001", "C002", "C003"] { + let _ = sqlx::query("SELECT pglinter.disable_rule($1)") + .bind(rule) + .execute(pool) + .await; + } +} + +struct TestSetup<'a> { + name: &'a str, + setup: &'a str, + test_db: &'a PgPool, + /// Only include rules matching these prefixes (e.g., ["B001", "B005"]) + /// Empty means include all non-cluster rules + rule_filter: Vec<&'a str>, +} + +impl TestSetup<'_> { + async fn test(self) { + // Create required extensions (pglinter may depend on plpgsql_check) + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS plpgsql_check") + .execute(self.test_db) + .await + .expect("plpgsql_check extension not available"); + + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS pglinter") + .execute(self.test_db) + .await + .expect("pglinter extension not available"); + + configure_pglinter_for_tests(self.test_db).await; + + sqlx::raw_sql(self.setup) + .execute(self.test_db) + .await + .expect("Failed to setup test database"); + + let schema_cache = SchemaCache::load(self.test_db) + .await + .expect("Failed to load schema cache"); + + let cache = PglinterCache::load(self.test_db, &schema_cache) + .await + .expect("Failed to load pglinter cache"); + + let filter = AnalysisFilter::default(); + let diagnostics = run_pglinter( + PglinterParams { + conn: self.test_db, + schema_cache: &schema_cache, + }, + &filter, + Some(&cache), + ) + .await + .expect("Failed to run pglinter checks"); + + // Filter diagnostics + let filtered: Vec<_> = diagnostics + .iter() + .filter(|d| { + let category = d.category().map(|c| c.name()).unwrap_or(""); + // Exclude cluster-level rules + if category.contains("/cluster/") { + return false; + } + // Apply rule filter if specified + if !self.rule_filter.is_empty() { + let rule_code = d.advices.rule_code.as_deref().unwrap_or(""); + return self.rule_filter.contains(&rule_code); + } + true + }) + .collect(); + + // Sort by category for deterministic output + let mut sorted = filtered; + sorted.sort_by_key(|d| d.category().map(|c| c.name()).unwrap_or("unknown")); + + let content = if sorted.is_empty() { + String::from("No Diagnostics") + } else { + let mut result = String::new(); + + for (idx, diagnostic) in sorted.iter().enumerate() { + if idx > 0 { + writeln!(&mut result).unwrap(); + writeln!(&mut result, "---").unwrap(); + writeln!(&mut result).unwrap(); + } + + let category_name = diagnostic.category().map(|c| c.name()).unwrap_or("unknown"); + writeln!(&mut result, "Category: {category_name}").unwrap(); + writeln!(&mut result, "Severity: {:?}", diagnostic.severity()).unwrap(); + + let mut msg_content = vec![]; + let mut writer = HTML::new(&mut msg_content); + let mut formatter = Formatter::new(&mut writer); + diagnostic.message(&mut formatter).unwrap(); + writeln!( + &mut result, + "Message: {}", + String::from_utf8(msg_content).unwrap() + ) + .unwrap(); + + let mut visitor = TestVisitor::new(); + diagnostic.advices(&mut visitor).unwrap(); + let advice_text = visitor.into_string(); + if !advice_text.is_empty() { + writeln!(&mut result, "Advices:\n{advice_text}").unwrap(); + } + } + + result + }; + + insta::with_settings!({ + prepend_module_to_snapshot => false, + }, { + insta::assert_snapshot!(self.name, content); + }); + } +} + +/// Test that pglinter extension can be created +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn extension_check(test_db: PgPool) { + // Create required extensions (pglinter may depend on plpgsql_check) + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS plpgsql_check") + .execute(&test_db) + .await + .expect("plpgsql_check extension not available"); + + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS pglinter") + .execute(&test_db) + .await + .expect("pglinter extension not available"); + + let schema_cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load schema cache"); + + assert!( + schema_cache.extensions.iter().any(|e| e.name == "pglinter"), + "pglinter extension not found" + ); +} + +/// Test B001: Table without primary key +/// Note: pglinter checks ALL tables in the database globally, not just specific tables. +/// So this test verifies that B001 fires when any table lacks a primary key. +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn table_without_primary_key(test_db: PgPool) { + TestSetup { + name: "table_without_primary_key", + setup: r#" + CREATE TABLE public.test_no_pk ( + name text, + value integer + ); + "#, + test_db: &test_db, + rule_filter: vec!["B001"], + } + .test() + .await; +} + +/// Test B005: Objects with uppercase names +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn objects_with_uppercase(test_db: PgPool) { + TestSetup { + name: "objects_with_uppercase", + setup: r#" + CREATE TABLE public."TestTable" ( + id serial PRIMARY KEY, + "UserName" text + ); + "#, + test_db: &test_db, + rule_filter: vec!["B005"], + } + .test() + .await; +} + +/// Test B003: Foreign key without index +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn fk_without_index(test_db: PgPool) { + TestSetup { + name: "fk_without_index", + setup: r#" + CREATE TABLE public.parent_table ( + id serial PRIMARY KEY, + name text + ); + + CREATE TABLE public.child_table ( + id serial PRIMARY KEY, + parent_id integer NOT NULL REFERENCES public.parent_table(id) + ); + "#, + test_db: &test_db, + rule_filter: vec!["B003"], + } + .test() + .await; +} + +/// Test multiple issues at once +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn multiple_issues(test_db: PgPool) { + TestSetup { + name: "multiple_issues", + setup: r#" + -- Table without primary key + CREATE TABLE public.no_pk ( + name text + ); + + -- Table with uppercase name + CREATE TABLE public."BadName" ( + id serial PRIMARY KEY + ); + + -- FK without index + CREATE TABLE public.ref_parent ( + id serial PRIMARY KEY + ); + + CREATE TABLE public.ref_child ( + id serial PRIMARY KEY, + parent_id integer REFERENCES public.ref_parent(id) + ); + "#, + test_db: &test_db, + rule_filter: vec!["B001", "B003", "B005"], + } + .test() + .await; +} diff --git a/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap b/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap new file mode 100644 index 000000000..5f79a0dea --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyTableWithoutIndexOnFk +Severity: Warning +Message: Count number of tables without index on foreign key. +Advices: +Count number of tables without index on foreign key. +[Info] Rule: B003 +How to fix: +[Info] 1. create a index on foreign key or change warning/error threshold diff --git a/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap b/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap new file mode 100644 index 000000000..15ed63c69 --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap @@ -0,0 +1,35 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Count number of objects with uppercase in name or in columns. +Advices: +Count number of objects with uppercase in name or in columns. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyTableWithoutIndexOnFk +Severity: Warning +Message: Count number of tables without index on foreign key. +Advices: +Count number of tables without index on foreign key. +[Info] Rule: B003 +How to fix: +[Info] 1. create a index on foreign key or change warning/error threshold + +--- + +Category: pglinter/base/howManyTableWithoutPrimaryKey +Severity: Warning +Message: Count number of tables without primary key. +Advices: +Count number of tables without primary key. +[Info] Rule: B001 +How to fix: +[Info] 1. create a primary key or change warning/error threshold diff --git a/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap b/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap new file mode 100644 index 000000000..3fbd674a6 --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Count number of objects with uppercase in name or in columns. +Advices: +Count number of objects with uppercase in name or in columns. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects diff --git a/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap b/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap new file mode 100644 index 000000000..b6611adb5 --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyTableWithoutPrimaryKey +Severity: Warning +Message: Count number of tables without primary key. +Advices: +Count number of tables without primary key. +[Info] Rule: B001 +How to fix: +[Info] 1. create a primary key or change warning/error threshold diff --git a/crates/pgls_pglinter/vendor/.gitkeep b/crates/pgls_pglinter/vendor/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/crates/pgls_pglinter/vendor/COMMIT_SHA.txt b/crates/pgls_pglinter/vendor/COMMIT_SHA.txt new file mode 100644 index 000000000..88d050b19 --- /dev/null +++ b/crates/pgls_pglinter/vendor/COMMIT_SHA.txt @@ -0,0 +1 @@ +main \ No newline at end of file diff --git a/crates/pgls_pglinter/vendor/sql/rules.sql b/crates/pgls_pglinter/vendor/sql/rules.sql new file mode 100644 index 000000000..be6d3ca8f --- /dev/null +++ b/crates/pgls_pglinter/vendor/sql/rules.sql @@ -0,0 +1,1615 @@ +-- ============================================================================= +-- pglinter Rules Configuration +-- ============================================================================= +-- +-- This file defines the comprehensive rule set for the pglinter PostgreSQL +-- extension. It creates the rules table that stores metadata for all +-- database analysis rules. +-- +-- Rule Categories: +-- B-series: Base Database Rules (tables, indexes, primary keys, etc.) +-- C-series: Cluster Rules (configuration, security, performance) +-- S-series: Schema Rules (permissions, ownership, security) +-- +-- Each rule includes: +-- - Rule code (e.g., B001, T003) +-- - Configurable warning/error thresholds +-- - Scope (BASE, CLUSTER, SCHEMA, TABLE) +-- - Descriptive metadata and fix suggestions +-- - SQL queries for analysis (q1/q2 fields) +-- +-- Usage: +-- This file is automatically executed during extension installation +-- via pgrx's extension_sql_file! macro. +-- +-- ============================================================================= + +CREATE TABLE IF NOT EXISTS pglinter.rules ( + id SERIAL PRIMARY KEY, + name TEXT, + code TEXT, + enable BOOL DEFAULT TRUE, + warning_level INT, + error_level INT, + scope TEXT, + description TEXT, + message TEXT, + fixes TEXT [], + q1 TEXT, + q2 TEXT, + q3 TEXT +); + +-- Clear existing data and insert comprehensive rules +DELETE FROM pglinter.rules; + + +INSERT INTO pglinter.rules ( + name, + code, + warning_level, + error_level, + scope, + description, + message, + fixes +) VALUES +-- Base Database Rules (B series) +( + 'HowManyTableWithoutPrimaryKey', 'B001', 1, 80, 'BASE', + 'Count number of tables without primary key.', + '{0}/{1} table(s) without primary key exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['create a primary key or change warning/error threshold'] +), +( + 'HowManyRedudantIndex', 'B002', 1, 80, 'BASE', + 'Count number of redundant index vs nb index.', + '{0}/{1} redundant(s) index exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY[ + 'remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold' + ] +), +( + 'HowManyTableWithoutIndexOnFk', 'B003', 1, 80, 'BASE', + 'Count number of tables without index on foreign key.', + '{0}/{1} table(s) without index on foreign key exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['create a index on foreign key or change warning/error threshold'] +), +( + 'HowManyUnusedIndex', 'B004', 20, 80, 'BASE', + 'Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)', + '{0}/{1} unused index exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['remove unused index or change warning/error threshold'] +), +( + 'HowManyObjectsWithUppercase', 'B005', 20, 80, 'BASE', + 'Count number of objects with uppercase in name or in columns.', + '{0}/{1} object(s) using uppercase for name or columns exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['Do not use uppercase for any database objects'] +), +( + 'HowManyTablesNeverSelected', 'B006', 1, 80, 'BASE', + 'Count number of table(s) that has never been selected.', + '{0}/{1} table(s) are never selected the {2} threshold: {3}%. Object list:\n{4}', + ARRAY[ + 'Is it necessary to update/delete/insert rows in table(s) that are never selected ?' + ] +), +( + 'HowManyTablesWithFkOutsideSchema', 'B007', 20, 80, 'BASE', + 'Count number of tables with foreign keys outside their schema.', + '{0}/{1} table(s) with foreign keys outside schema exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY[ + 'Consider restructuring schema design to keep related tables in same schema', + 'ask a dba' + ] +), +( + 'HowManyTablesWithFkMismatch', 'B008', 1, 80, 'BASE', + 'Count number of tables with foreign keys that do not match the key reference type.', + '{0}/{1} table(s) with foreign key mismatch exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY[ + 'Consider column type adjustments to ensure foreign key matches referenced key type', + 'ask a dba' + ] +), +( + 'HowManyTablesWithSameTrigger', 'B009', 20, 80, 'BASE', + 'Count number of tables using the same trigger vs nb table with their own triggers.', + '{0}/{1} table(s) using the same trigger function exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY[ + 'For more readability and other considerations use one trigger function per table.', + 'Sharing the same trigger function add more complexity.' + ] +), +( + 'HowManyTablesWithReservedKeywords', 'B010', 20, 80, 'BASE', + 'Count number of database objects using reserved keywords in their names.', + '{0}/{1} object(s) using reserved keywords exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY[ + 'Rename database objects to avoid using reserved keywords.', + 'Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.' + ] +), +( + 'SeveralTableOwnerInSchema', 'B011', 1, 80, 'BASE', + 'In a schema there are several tables owned by different owners.', + '{0}/{1} schemas have tables owned by different owners. Exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['change table owners to the same functional role'] +), +( + 'CompositePrimaryKeyTooManyColumns', 'B012', 1, 80, 'BASE', + 'Detect tables with composite primary keys involving more than 4 columns', + '{0} table(s) have composite primary keys with more than 4 columns. Object list:\n{4}', + ARRAY[ + 'Consider redesigning the table to avoid composite primary keys with more than 4 columns', + 'Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.' + ] +), +( + 'SchemaWithDefaultRoleNotGranted', 'S001', 1, 1, 'SCHEMA', + 'The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.', + 'No default role grantee on schema {0}.{1}. It means that each time a table is created, you must grant it to roles. Object list:\n{4}', + ARRAY[ + 'add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user ' + ] +), + +( + 'SchemaPrefixedOrSuffixedWithEnvt', 'S002', 1, 1, 'SCHEMA', + 'The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.', + '{0}/{1} schemas are prefixed or suffixed with environment names. It exceed the {2} threshold: {3}%. Prefer prefix or suffix the database name instead. Object list:\n{4}', + ARRAY[ + 'Keep the same schema name across environments. Prefer prefix or suffix the database name' + ] +), +( + 'UnsecuredPublicSchema', 'S003', 1, 80, 'SCHEMA', + 'Only authorized users should be allowed to create objects.', + '{0}/{1} schemas are unsecured, schemas where all users can create objects in, exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['REVOKE CREATE ON SCHEMA FROM PUBLIC'] +), +( + 'OwnerSchemaIsInternalRole', 'S004', 20, 80, 'SCHEMA', + 'Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).', + '{0}/{1} schemas are owned by internal roles or superuser. Exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['change schema owner to a functional role'] +), +( + 'SchemaOwnerDoNotMatchTableOwner', 'S005', 20, 80, 'SCHEMA', + 'The schema owner and tables in the schema do not match.', + '{0}/{1} in the same schema, tables have different owners. They should be the same. Exceed the {2} threshold: {3}%. Object list:\n{4}', + ARRAY['For maintenance facilities, schema and tables owners should be the same.'] +), +( + 'PgHbaEntriesWithMethodTrustShouldNotExists', + 'C001', + 20, + 80, + 'CLUSTER', + 'This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.', + '{0} entries in pg_hba.conf with trust authentication method exceed the warning threshold: {1}.', + ARRAY['change trust method in pg_hba.conf'] +), +( + 'PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists', + 'C002', + 20, + 80, + 'CLUSTER', + 'This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.', + '{0} entries in pg_hba.conf with trust or password authentication method exceed the warning threshold: {1}.', + ARRAY['change trust or password method in pg_hba.conf'] +), +( + 'PasswordEncryptionIsMd5', + 'C003', + 20, + 80, + 'CLUSTER', + 'This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.', + 'Encrypted passwords with MD5.', + ARRAY['change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = ''scram-sha-256'' ). Warning, you will need to reset all passwords after this parameter is updated.'] +); + + +-- ============================================================================= +-- RULE QUERY UPDATES - Auto-generated from individual SQL files +-- ============================================================================= +-- The following UPDATE statements populate the q1 and q2 columns +-- with SQL queries extracted from individual *q*.sql files. +-- These queries are used by the pglinter engine to execute rule checks. +-- ============================================================================= + +-- B001 - Tables Without Primary Key +UPDATE pglinter.rules +SET + q1 = $$ +SELECT count(*)::BIGINT AS total_tables +FROM pg_catalog.pg_tables +WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +SELECT +count(1)::BIGINT AS tables_without_primary_key +FROM + pg_class c +JOIN + pg_namespace n ON n.oid = c.relnamespace +LEFT JOIN + pg_index i ON i.indrelid = c.oid AND i.indisprimary +WHERE + n.nspname NOT IN ('pg_catalog', 'information_schema', 'gp_toolkit','_timescaledb', 'timescaledb') -- Exclude system schemas + AND c.relkind = 'r' -- Only include regular tables + AND i.indrelid IS NULL +$$, + q3 = $$ +SELECT pt.schemaname::text,pt.tablename::text +FROM pg_tables AS pt +WHERE + pt.schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND NOT EXISTS ( + SELECT 1 + FROM pg_constraint AS pc + WHERE + pc.conrelid = ( + SELECT pg_class.oid + FROM pg_class + JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid + WHERE + pg_class.relname = pt.tablename + AND pg_namespace.nspname = pt.schemaname + ) + AND pc.contype = 'p' + ) +ORDER BY 1 +$$ +WHERE code = 'B001'; + + +-- ============================================================================= +-- B002 - Redundant Indexes (Total Index Count Query) +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT COUNT(*) AS total_indexes +FROM pg_indexes +WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +SELECT COUNT(*) AS redundant_indexes +FROM ( + SELECT DISTINCT i1.indexrelid + FROM pg_index i1, pg_index i2 + WHERE + i1.indrelid = i2.indrelid + AND i1.indexrelid != i2.indexrelid + AND i1.indkey = i2.indkey + AND EXISTS ( + SELECT 1 FROM pg_indexes pi1 + WHERE + pi1.indexname + = ( + SELECT relname FROM pg_class + WHERE oid = i1.indexrelid + ) + AND pi1.schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + ) +) redundant +$$, + q3 = $$ +WITH index_info AS ( + -- This CTE gets the column info, plus the boolean flag for Primary Key (indisprimary). + SELECT + ind.indrelid AS table_oid, + ind.indexrelid AS index_oid, + att.attname AS column_name, + array_position(ind.indkey, att.attnum) AS column_order, + ind.indisprimary -- Added Primary Key flag + FROM pg_index ind + JOIN pg_attribute att ON att.attrelid = ind.indrelid AND att.attnum = ANY(ind.indkey) + WHERE NOT ind.indisexclusion +), +indexed_columns AS ( + -- Aggregates columns for each index and propagates PK flag. + SELECT + table_oid, + index_oid, + string_agg(column_name, ',' ORDER BY column_order) AS indexed_columns_string, + MAX(indisprimary::int)::bool AS is_primary_key + FROM index_info + GROUP BY table_oid, index_oid +), +table_info AS ( + -- Joins to pg_class and pg_namespace to get table names and schema names. + SELECT + oid AS table_oid, + relname AS tablename, + relnamespace + FROM pg_class +) +SELECT + pg_namespace.nspname::TEXT AS schema_name, + table_info.tablename::TEXT AS table_name, + redundant_index.relname::TEXT ||'('|| i1.indexed_columns_string || ') is redundant with '|| superset_index.relname||'('|| i2.indexed_columns_string ||')' AS problematic_object +FROM indexed_columns AS i1 -- The smaller/redundant index +JOIN indexed_columns AS i2 ON i1.table_oid = i2.table_oid -- The larger/superset index +JOIN pg_class redundant_index ON i1.index_oid = redundant_index.oid +JOIN pg_class superset_index ON i2.index_oid = superset_index.oid +JOIN table_info ON i1.table_oid = table_info.table_oid +JOIN pg_namespace ON table_info.relnamespace = pg_namespace.oid +WHERE + pg_namespace.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND i1.index_oid <> i2.index_oid -- Ensure the indexes are not the same + -- Checks if the smaller index's column string is a prefix of the larger index's string. + AND i2.indexed_columns_string LIKE i1.indexed_columns_string || '%' + +ORDER BY 1, 2 +$$ +WHERE code = 'B002'; + +-- ============================================================================= +-- B003 - Foreign Key without Index +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT count(DISTINCT tc.table_name)::BIGINT AS total_tables +FROM + information_schema.table_constraints AS tc +WHERE + tc.constraint_type = 'FOREIGN KEY' + AND tc.table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +SELECT COUNT(DISTINCT c.relname)::INT AS tables_with_unindexed_foreign_keys +FROM pg_constraint con +JOIN pg_class c ON c.oid = con.conrelid +JOIN pg_namespace n ON n.oid = c.relnamespace +LEFT JOIN + pg_index i + ON i.indrelid = c.oid AND con.conkey::smallint [] <@ i.indkey::smallint [] +WHERE + con.contype = 'f' + AND c.relkind = 'r' + AND i.indexrelid IS NULL + AND n.nspname NOT IN ('pg_catalog', 'pg_toast', 'information_schema', 'pglinter','_timescaledb', 'timescaledb') +$$, + q3 = $$ +SELECT DISTINCT + tc.table_schema::text, + tc.table_name::text, + tc.constraint_name::text AS problematic_object +FROM information_schema.table_constraints AS tc +INNER JOIN information_schema.key_column_usage AS kcu + ON + tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema +WHERE + tc.constraint_type = 'FOREIGN KEY' + AND tc.table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND NOT EXISTS ( + SELECT 1 FROM pg_indexes AS pi + WHERE + pi.schemaname = tc.table_schema + AND pi.tablename = tc.table_name + AND pi.indexdef LIKE '%' || kcu.column_name || '%' + ) +ORDER BY 1 +$$ +WHERE code = 'B003'; + +-- ============================================================================= +-- B004 - Manual Index Usage (Total) +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT COUNT(*) AS total_manual_indexes +FROM pg_stat_user_indexes AS psu +JOIN pg_index AS pgi ON psu.indexrelid = pgi.indexrelid +WHERE + pgi.indisprimary = FALSE -- Excludes indexes created for a PRIMARY KEY + -- Excludes indexes created for a UNIQUE constraint + AND pgi.indisunique = FALSE + AND psu.schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +SELECT COUNT(*) AS unused_manual_indexes +FROM pg_stat_user_indexes AS psu +JOIN pg_index AS pgi ON psu.indexrelid = pgi.indexrelid +WHERE + psu.idx_scan = 0 + AND pgi.indisprimary = FALSE -- Excludes indexes created for a PRIMARY KEY + -- Excludes indexes created for a UNIQUE constraint + AND pgi.indisunique = FALSE + AND psu.schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) +$$, + q3 = $$ +SELECT + schemaname::text, + relname::text || 'has' || + LEAST( + ROUND( + ( + seq_tup_read::numeric + / NULLIF((seq_tup_read + idx_tup_fetch)::numeric, 0) + ) * 100, 0 + ), + 100 + )::text ||' % of seq scan.' AS problematic_object +FROM pg_stat_user_tables +WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) +ORDER BY 1, 2 +$$ +WHERE code = 'B004'; + + +-- ============================================================================= +-- B005 - Objects With Uppercase (Total) +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT COUNT(*) AS total_objects +FROM ( + -- All tables + SELECT + 'table' AS object_type, + table_schema AS schema_name, + table_name AS object_name + FROM information_schema.tables + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + + UNION + + -- All columns + SELECT + 'column' AS object_type, + table_schema AS schema_name, + table_name || '.' || column_name AS object_name + FROM information_schema.columns + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + + UNION + + -- All indexes + SELECT + 'index' AS object_type, + schemaname AS schema_name, + indexname AS object_name + FROM pg_indexes + WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + + UNION + + -- All sequences + SELECT + 'sequence' AS object_type, + sequence_schema AS schema_name, + sequence_name AS object_name + FROM information_schema.sequences + WHERE + sequence_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + + UNION + + -- All views + SELECT + 'view' AS object_type, + table_schema AS schema_name, + table_name AS object_name + FROM information_schema.views + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + + UNION + + -- All functions + SELECT + 'function' AS object_type, + routine_schema AS schema_name, + routine_name AS object_name + FROM information_schema.routines + WHERE + routine_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND routine_type = 'FUNCTION' + + UNION + + -- All triggers + SELECT + 'trigger' AS object_type, + trigger_schema AS schema_name, + trigger_name AS object_name + FROM information_schema.triggers + WHERE + trigger_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + + UNION + + -- All schemas + SELECT + 'schema' AS object_type, + schema_name AS schema_name, + schema_name AS object_name + FROM information_schema.schemata + WHERE + schema_name NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) +) all_objects +$$, + q2 = $$ +SELECT COUNT(*) AS uppercase_objects +FROM ( + -- Tables with uppercase names + SELECT + 'table' AS object_type, + table_schema AS schema_name, + table_name AS object_name + FROM information_schema.tables + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND table_name != LOWER(table_name) + + UNION + + -- Columns with uppercase names + SELECT + 'column' AS object_type, + table_schema AS schema_name, + table_name || '.' || column_name AS object_name + FROM information_schema.columns + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND column_name != LOWER(column_name) + + UNION + + -- Indexes with uppercase names + SELECT + 'index' AS object_type, + schemaname AS schema_name, + indexname AS object_name + FROM pg_indexes + WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND indexname != LOWER(indexname) + + UNION + + -- Sequences with uppercase names + SELECT + 'sequence' AS object_type, + sequence_schema AS schema_name, + sequence_name AS object_name + FROM information_schema.sequences + WHERE + sequence_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND sequence_name != LOWER(sequence_name) + + UNION + + -- Views with uppercase names + SELECT + 'view' AS object_type, + table_schema AS schema_name, + table_name AS object_name + FROM information_schema.views + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND table_name != LOWER(table_name) + + UNION + + -- Functions with uppercase names + SELECT + 'function' AS object_type, + routine_schema AS schema_name, + routine_name AS object_name + FROM information_schema.routines + WHERE + routine_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND routine_type = 'FUNCTION' + AND routine_name != LOWER(routine_name) + + UNION + + -- Triggers with uppercase names + SELECT + 'trigger' AS object_type, + trigger_schema AS schema_name, + trigger_name AS object_name + FROM information_schema.triggers + WHERE + trigger_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND trigger_name != LOWER(trigger_name) + + UNION + + -- Schemas with uppercase names + SELECT + 'schema' AS object_type, + schema_name AS schema_name, + schema_name AS object_name + FROM information_schema.schemata + WHERE + schema_name NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND schema_name != LOWER(schema_name) +) uppercase_objects +$$, + q3 = $$ +SELECT + object_type::TEXT ||' '||schema_name::TEXT, + object_name::TEXT AS problematic_object +FROM ( + -- Tables with uppercase names + SELECT + 'table' AS object_type, + table_schema AS schema_name, + table_name AS object_name + FROM information_schema.tables + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND table_name != LOWER(table_name) + + UNION ALL + + -- Columns with uppercase names + SELECT + 'column' AS object_type, + table_schema AS schema_name, + table_name || '.' || column_name AS object_name + FROM information_schema.columns + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND column_name != LOWER(column_name) + + UNION ALL + + -- Indexes with uppercase names + SELECT + 'index' AS object_type, + schemaname AS schema_name, + indexname AS object_name + FROM pg_indexes + WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND indexname != LOWER(indexname) + + UNION ALL + + -- Sequences with uppercase names + SELECT + 'sequence' AS object_type, + sequence_schema AS schema_name, + sequence_name AS object_name + FROM information_schema.sequences + WHERE + sequence_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND sequence_name != LOWER(sequence_name) + + UNION ALL + + -- Views with uppercase names + SELECT + 'view' AS object_type, + table_schema AS schema_name, + table_name AS object_name + FROM information_schema.views + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND table_name != LOWER(table_name) + + UNION ALL + + -- Functions with uppercase names + SELECT + 'function' AS object_type, + routine_schema AS schema_name, + routine_name AS object_name + FROM information_schema.routines + WHERE + routine_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND routine_type = 'FUNCTION' + AND routine_name != LOWER(routine_name) + + UNION ALL + + -- Triggers with uppercase names + SELECT + 'trigger' AS object_type, + trigger_schema AS schema_name, + trigger_name AS object_name + FROM information_schema.triggers + WHERE + trigger_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND trigger_name != LOWER(trigger_name) + + UNION ALL + + -- Schemas with uppercase names + SELECT + 'schema' AS object_type, + schema_name AS schema_name, + schema_name AS object_name + FROM information_schema.schemata + WHERE + schema_name NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) + AND schema_name != LOWER(schema_name) +) AS uppercase_objects +ORDER BY + object_type, + schema_name, + object_name +$$ +WHERE code = 'B005'; + +-- ============================================================================= +-- B006 - Tables Never Selected (Total) +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT count(*)::BIGINT +FROM pg_catalog.pg_tables pt +WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +SELECT COUNT(*) AS unselected_tables +FROM pg_stat_user_tables AS psu +WHERE + (psu.idx_scan = 0 OR psu.idx_scan IS NULL) + AND (psu.seq_scan = 0 OR psu.seq_scan IS NULL) + AND n_tup_ins > 0 + AND (n_tup_upd = 0 OR n_tup_upd IS NULL) + AND (n_tup_del = 0 OR n_tup_del IS NULL) + AND psu.schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$, + q3 = $$ +SELECT psu.schemaname::text, psu.relname::text +FROM pg_stat_user_tables AS psu +WHERE + (psu.idx_scan = 0 OR psu.idx_scan IS NULL) + AND (psu.seq_scan = 0 OR psu.seq_scan IS NULL) + AND n_tup_ins > 0 + AND (n_tup_upd = 0 OR n_tup_upd IS NULL) + AND (n_tup_del = 0 OR n_tup_del IS NULL) + AND psu.schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$ +WHERE code = 'B006'; + + +-- ============================================================================= +-- B007 - Tables With FK Outside Schema (Total) +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(DISTINCT conrelid::regclass) AS tables_with_foreign_keys +FROM + pg_constraint c +JOIN + pg_class r ON r.oid = c.conrelid +JOIN + pg_namespace n ON n.oid = r.relnamespace +WHERE + c.contype = 'f' -- Filter for Foreign Key constraints + AND n.nspname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +SELECT + COUNT( + DISTINCT tc.table_schema || '.' || tc.table_name + ) AS tables_with_fk_outside_schema +FROM information_schema.table_constraints AS tc +INNER JOIN information_schema.constraint_column_usage AS ccu + ON tc.constraint_name = ccu.constraint_name +WHERE + tc.constraint_type = 'FOREIGN KEY' + AND tc.table_schema != ccu.table_schema + AND tc.table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$, + q3 = $$ +SELECT + tc.table_schema::TEXT,tc.table_name::TEXT, + 'has foreign key '||tc.constraint_name::TEXT||' referencing '|| + ccu.table_schema::TEXT||'.'||ccu.table_name::TEXT AS problematic_object +FROM information_schema.table_constraints AS tc +INNER JOIN information_schema.constraint_column_usage AS ccu + ON tc.constraint_name = ccu.constraint_name +WHERE + tc.constraint_type = 'FOREIGN KEY' + AND tc.table_schema != ccu.table_schema + AND tc.table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$ +WHERE code = 'B007'; + +-- ============================================================================= +-- B008 - Tables With FK mismatch +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(DISTINCT conrelid::regclass) AS tables_with_foreign_keys +FROM + pg_constraint c +JOIN + pg_class r ON r.oid = c.conrelid +JOIN + pg_namespace n ON n.oid = r.relnamespace +WHERE + c.contype = 'f' -- Filter for Foreign Key constraints + AND n.nspname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +SELECT + count(1)::BIGINT AS fk_type_mismatches +FROM information_schema.table_constraints AS tc +INNER JOIN information_schema.key_column_usage AS kcu + ON + tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema +INNER JOIN information_schema.constraint_column_usage AS ccu + ON tc.constraint_name = ccu.constraint_name +INNER JOIN information_schema.columns AS col1 + ON + kcu.table_schema = col1.table_schema + AND kcu.table_name = col1.table_name + AND kcu.column_name = col1.column_name +INNER JOIN information_schema.columns AS col2 + ON + ccu.table_schema = col2.table_schema + AND ccu.table_name = col2.table_name + AND ccu.column_name = col2.column_name +WHERE + tc.constraint_type = 'FOREIGN KEY' + AND tc.table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND col1.data_type != col2.data_type +$$, + q3 = $$ +SELECT + tc.table_schema::text || '.' + || tc.table_name::text || ' constraint ' + || tc.constraint_name::text || ' column ' + || kcu.column_name::text || ' type is ' + || col1.data_type::text || ' but ' + || ccu.table_name::text || '.' + || ccu.column_name::text || ' type is ' + || col2.data_type::text AS problematic_object +FROM information_schema.table_constraints AS tc +INNER JOIN information_schema.key_column_usage AS kcu + ON + tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema +INNER JOIN information_schema.constraint_column_usage AS ccu + ON tc.constraint_name = ccu.constraint_name +INNER JOIN information_schema.columns AS col1 + ON + kcu.table_schema = col1.table_schema + AND kcu.table_name = col1.table_name + AND kcu.column_name = col1.column_name +INNER JOIN information_schema.columns AS col2 + ON + ccu.table_schema = col2.table_schema + AND ccu.table_name = col2.table_name + AND ccu.column_name = col2.column_name +WHERE + tc.constraint_type = 'FOREIGN KEY' + AND tc.table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND col1.data_type != col2.data_type +$$ +WHERE code = 'B008'; + +-- ============================================================================= +-- B009 - Tables With same trigger +-- ============================================================================= +UPDATE pglinter.rules +SET q1 = $$ +SELECT + COALESCE(COUNT(DISTINCT event_object_table), 0)::BIGINT as table_using_trigger +FROM + information_schema.triggers t +WHERE + t.trigger_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter' +) +$$, + q2 = $$ +SELECT + COALESCE(SUM(shared_table_count), 0)::BIGINT AS table_using_same_trigger +FROM ( + SELECT + COUNT(DISTINCT t.event_object_table) AS shared_table_count + FROM ( + SELECT + t.event_object_table, + -- Extracts the function name from the action_statement (e.g., 'public.my_func()') + SUBSTRING(t.action_statement FROM 'EXECUTE FUNCTION ([^()]+)') AS trigger_function_name + FROM + information_schema.triggers t + WHERE + t.trigger_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter' + ) + ) t + GROUP BY + t.trigger_function_name + HAVING + COUNT(DISTINCT t.event_object_table) > 1 +) shared_triggers +$$, + q3 = $$ +WITH SharedFunctions AS ( + -- 1. Identify all trigger functions that are used by more than one table + SELECT + SUBSTRING(t.action_statement FROM 'EXECUTE FUNCTION ([^()]+)') AS trigger_function_name + FROM + information_schema.triggers t + WHERE + t.trigger_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + GROUP BY + 1 + HAVING + COUNT(DISTINCT t.event_object_table) > 1 +) +SELECT + t.event_object_table::TEXT AS table_name, + t.trigger_name::TEXT || ' uses the same trigger function ' || + t.trigger_schema::TEXT, + s.trigger_function_name::TEXT +FROM + information_schema.triggers t +JOIN + SharedFunctions s ON s.trigger_function_name = SUBSTRING(t.action_statement FROM 'EXECUTE FUNCTION ([^()]+)') +WHERE + t.trigger_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +ORDER BY + s.trigger_function_name, + t.trigger_schema, + t.event_object_table +$$ +WHERE code = 'B009'; + + +-- ============================================================================= +-- B010 - Tables With Reserved Keywords +-- ============================================================================= +UPDATE pglinter.rules + SET q1 = $$ +SELECT count(*)::BIGINT AS total_tables +FROM pg_catalog.pg_tables +WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter','_timescaledb', 'timescaledb' + ) +$$, + q2 = $$ +WITH reserved_keywords AS ( + SELECT UNNEST(ARRAY[ + 'ALL', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASC', + 'ASYMMETRIC', 'AUTHORIZATION', 'BINARY', 'BOTH', 'CASE', 'CAST', + 'CHECK', 'COLLATE', 'COLLATION', 'COLUMN', 'CONCURRENTLY', + 'CONSTRAINT', 'CREATE', 'CROSS', 'CURRENT_CATALOG', 'CURRENT_DATE', + 'CURRENT_ROLE', 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', + 'CURRENT_USER', 'DEFAULT', 'DEFERRABLE', 'DESC', 'DISTINCT', 'DO', + 'ELSE', 'END', 'EXCEPT', 'FALSE', 'FETCH', 'FOR', 'FOREIGN', 'FROM', + 'FULL', 'GRANT', 'GROUP', 'HAVING', 'IN', 'INITIALLY', 'INNER', + 'INTERSECT', 'INTO', 'IS', 'ISNULL', 'JOIN', 'LATERAL', 'LEADING', + 'LEFT', 'LIKE', 'LIMIT', 'LOCALTIME', 'LOCALTIMESTAMP', 'NATURAL', + 'NOT', 'NOTNULL', 'NULL', 'OFFSET', 'ON', 'ONLY', 'OR', 'ORDER', + 'OUTER', 'OVERLAPS', 'PLACING', 'PRIMARY', 'REFERENCES', 'RETURNING', + 'RIGHT', 'SELECT', 'SESSION_USER', 'SIMILAR', 'SOME', 'SYMMETRIC', + 'TABLE', 'TABLESAMPLE', 'THEN', 'TO', 'TRAILING', 'TRUE', 'UNION', + 'UNIQUE', 'USER', 'USING', 'VARIADIC', 'VERBOSE', 'WHEN', 'WHERE', + 'WINDOW', 'WITH' + ]) AS keyword +) +SELECT + COUNT(1) AS total_reserved_keyword_objects +FROM ( + -- Tables using reserved keywords + SELECT + 'table' AS object_type, + table_schema AS schema_name, + table_name AS object_name + FROM information_schema.tables + CROSS JOIN reserved_keywords + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND UPPER(table_name) = keyword + + UNION ALL -- Use UNION ALL for counting to avoid redundant DISTINCT check + + -- Columns using reserved keywords + SELECT + 'column' AS object_type, + table_schema AS schema_name, + table_name || '.' || column_name AS object_name + FROM information_schema.columns + CROSS JOIN reserved_keywords + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND UPPER(column_name) = keyword + + UNION ALL + + -- Indexes using reserved keywords + SELECT + 'index' AS object_type, + schemaname AS schema_name, + indexname AS object_name + FROM pg_indexes + CROSS JOIN reserved_keywords + WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND UPPER(indexname) = keyword +) reserved_objects +$$, + q3 = $$ +WITH reserved_keywords AS ( + SELECT UNNEST(ARRAY[ + 'ALL', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASC', + 'ASYMMETRIC', 'AUTHORIZATION', 'BINARY', 'BOTH', 'CASE', 'CAST', + 'CHECK', 'COLLATE', 'COLLATION', 'COLUMN', 'CONCURRENTLY', + 'CONSTRAINT', 'CREATE', 'CROSS', 'CURRENT_CATALOG', 'CURRENT_DATE', + 'CURRENT_ROLE', 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', + 'CURRENT_USER', 'DEFAULT', 'DEFERRABLE', 'DESC', 'DISTINCT', 'DO', + 'ELSE', 'END', 'EXCEPT', 'FALSE', 'FETCH', 'FOR', 'FOREIGN', 'FROM', + 'FULL', 'GRANT', 'GROUP', 'HAVING', 'IN', 'INITIALLY', 'INNER', + 'INTERSECT', 'INTO', 'IS', 'ISNULL', 'JOIN', 'LATERAL', 'LEADING', + 'LEFT', 'LIKE', 'LIMIT', 'LOCALTIME', 'LOCALTIMESTAMP', 'NATURAL', + 'NOT', 'NOTNULL', 'NULL', 'OFFSET', 'ON', 'ONLY', 'OR', 'ORDER', + 'OUTER', 'OVERLAPS', 'PLACING', 'PRIMARY', 'REFERENCES', 'RETURNING', + 'RIGHT', 'SELECT', 'SESSION_USER', 'SIMILAR', 'SOME', 'SYMMETRIC', + 'TABLE', 'TABLESAMPLE', 'THEN', 'TO', 'TRAILING', 'TRUE', 'UNION', + 'UNIQUE', 'USER', 'USING', 'VARIADIC', 'VERBOSE', 'WHEN', 'WHERE', + 'WINDOW', 'WITH' + ]) AS keyword +) +SELECT + object_type || ' in ' || + schema_name, + object_name || ' is a reserved keyword: ' || + keyword AS reserved_keyword_match +FROM ( + -- Tables using reserved keywords + SELECT + 'table' AS object_type, + table_schema AS schema_name, + table_name AS object_name, + keyword + FROM information_schema.tables + CROSS JOIN reserved_keywords + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND UPPER(table_name) = keyword + + UNION ALL + + -- Columns using reserved keywords + SELECT + 'column' AS object_type, + table_schema AS schema_name, + table_name || '.' || column_name AS object_name, + keyword + FROM information_schema.columns + CROSS JOIN reserved_keywords + WHERE + table_schema NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND UPPER(column_name) = keyword + + UNION ALL + + -- Indexes using reserved keywords + SELECT + 'index' AS object_type, + schemaname AS schema_name, + indexname AS object_name, + keyword + FROM pg_indexes + CROSS JOIN reserved_keywords + WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND UPPER(indexname) = keyword +) AS reserved_objects +ORDER BY + object_type, + schema_name, + object_name +$$ +WHERE code = 'B010'; + +-- ============================================================================= +-- B011 - Several tables in schema have different owners +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(*)::BIGINT AS total_schema_count +FROM + pg_namespace n +WHERE + n.nspname NOT IN ( 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') +$$, + q2 = $$ +WITH C1 AS ( +SELECT coalesce(count(DISTINCT tableowner)::BIGINT, 0) AS diff_owners +FROM pg_tables +WHERE + schemaname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') +GROUP BY schemaname) +SELECT COUNT(1) from C1 where diff_owners > 1 +$$, + q3 = $$ +WITH SchemaOwnerTable AS ( + -- Step 1: Find all distinct combinations of (schemaname, tableowner) + SELECT DISTINCT + schemaname::TEXT AS schemaname, + tableowner::TEXT AS tableowner + FROM + pg_tables + WHERE + schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +), +OwnerCounts AS ( + -- Step 2: Count the number of distinct owners for each schema + SELECT + schemaname, + COUNT(tableowner) AS distinct_owner_count + FROM + SchemaOwnerTable + GROUP BY + schemaname + HAVING + -- Only keep schemas that have more than one distinct owner + COUNT(tableowner) > 1 +) +SELECT + t.schemaname::TEXT, + t.tablename || ' owner is ' || t.tableowner::TEXT AS table_and_owner +FROM + pg_tables t +JOIN + OwnerCounts oc ON t.schemaname = oc.schemaname +WHERE + t.schemaname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) +ORDER BY + 1, 2 +$$ +WHERE code = 'B011'; + +-- ============================================================================= +-- B012 - Composite primary keys with more than 4 columns +-- ============================================================================= + +UPDATE pglinter.rules +SET + q1 = $$ +SELECT COUNT(*)::BIGINT AS total_composite_pk_tables +FROM ( + SELECT tc.table_schema, tc.table_name, COUNT(kcu.column_name) AS pk_col_count + FROM information_schema.table_constraints tc + JOIN information_schema.key_column_usage kcu + ON tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema + AND tc.table_name = kcu.table_name + WHERE tc.constraint_type = 'PRIMARY KEY' + AND tc.table_schema NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + GROUP BY tc.table_schema, tc.table_name, tc.constraint_name +) sub +$$, + q2 = $$ +SELECT COUNT(*)::BIGINT AS total_composite_pk_tables +FROM ( + SELECT tc.table_schema, tc.table_name, COUNT(kcu.column_name) AS pk_col_count + FROM information_schema.table_constraints tc + JOIN information_schema.key_column_usage kcu + ON tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema + AND tc.table_name = kcu.table_name + WHERE tc.constraint_type = 'PRIMARY KEY' + AND tc.table_schema NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + GROUP BY tc.table_schema, tc.table_name, tc.constraint_name + HAVING COUNT(kcu.column_name) > 4 +) sub +$$, + q3 = $$ +SELECT + sub.table_schema || '.' || sub.table_name ||'('||string_agg(sub.column_name, ', ')||')' AS pk_columns +FROM ( + SELECT + tc.table_schema, + tc.table_name, + kcu.column_name + FROM information_schema.table_constraints tc + JOIN information_schema.key_column_usage kcu + ON tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema + AND tc.table_name = kcu.table_name + WHERE tc.constraint_type = 'PRIMARY KEY' + AND tc.table_schema NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') +) sub +GROUP BY sub.table_schema, sub.table_name +HAVING COUNT(sub.column_name) > 4 +$$ +WHERE code = 'B012'; + +-- ============================================================================= +-- S001 - Schema Permission Analysis +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(*) AS total_schema_count +FROM + pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' +$$, + q2 = $$ +SELECT count(DISTINCT n.nspname::text)::BIGINT AS nb_schema +FROM pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' + AND NOT EXISTS ( + SELECT 1 + FROM pg_default_acl da + WHERE + da.defaclnamespace = n.oid + AND da.defaclrole != n.nspowner + ) +ORDER BY 1 +$$, + q3 = $$ +SELECT DISTINCT n.nspname::text AS schema_name +FROM pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' + AND NOT EXISTS ( + SELECT 1 + FROM pg_default_acl da + WHERE + da.defaclnamespace = n.oid + AND da.defaclrole != n.nspowner + ) +ORDER BY 1 +$$ +WHERE code = 'S001'; + + +-- ============================================================================= +-- S002 - Environment-Named Schemas +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(*)::BIGINT AS total_schema_count +FROM + pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' +$$, + q2 = $$ +SELECT count(n.nspname::text)::BIGINT AS nb_schema_name +FROM pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' + AND ( + n.nspname ILIKE 'staging_%' OR n.nspname ILIKE '%_staging' + OR n.nspname ILIKE 'stg_%' OR n.nspname ILIKE '%_stg' + OR n.nspname ILIKE 'preprod_%' OR n.nspname ILIKE '%_preprod' + OR n.nspname ILIKE 'prod_%' OR n.nspname ILIKE '%_prod' + OR n.nspname ILIKE 'production_%' OR n.nspname ILIKE '%_production' + OR n.nspname ILIKE 'dev_%' OR n.nspname ILIKE '%_dev' + OR n.nspname ILIKE 'development_%' OR n.nspname ILIKE '%_development' + OR n.nspname ILIKE 'sandbox_%' OR n.nspname ILIKE '%_sandbox' + OR n.nspname ILIKE 'sbox_%' OR n.nspname ILIKE '%_sbox' + ) +$$, + q3 = $$ +SELECT n.nspname::text AS nb_schema_name +FROM pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' + AND ( + n.nspname ILIKE 'staging_%' OR n.nspname ILIKE '%_staging' + OR n.nspname ILIKE 'stg_%' OR n.nspname ILIKE '%_stg' + OR n.nspname ILIKE 'preprod_%' OR n.nspname ILIKE '%_preprod' + OR n.nspname ILIKE 'prod_%' OR n.nspname ILIKE '%_prod' + OR n.nspname ILIKE 'production_%' OR n.nspname ILIKE '%_production' + OR n.nspname ILIKE 'dev_%' OR n.nspname ILIKE '%_dev' + OR n.nspname ILIKE 'development_%' OR n.nspname ILIKE '%_development' + OR n.nspname ILIKE 'sandbox_%' OR n.nspname ILIKE '%_sandbox' + OR n.nspname ILIKE 'sbox_%' OR n.nspname ILIKE '%_sbox' + ) +ORDER BY 1 +$$ +WHERE code = 'S002'; + +-- ============================================================================= +-- S003 - Schema Public Access (Problems) +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(*)::BIGINT AS total_schema_count +FROM + pg_namespace n +WHERE + n.nspname NOT IN ( 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' +$$, + q2 = $$ +SELECT COUNT(*) AS total_schemas +FROM pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND HAS_SCHEMA_PRIVILEGE('public', n.nspname, 'CREATE') +$$, + q3 = $$ +SELECT n.nspname::text AS schemas +FROM pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND HAS_SCHEMA_PRIVILEGE('public', n.nspname, 'CREATE') +ORDER BY 1 +$$ +WHERE code = 'S003'; + +-- ============================================================================= +-- S004 - Schema Owner is Internal Role +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(*)::BIGINT AS total_schema_count +FROM + pg_namespace n +WHERE + n.nspname NOT IN ( 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' +$$, + q2 = $$ +SELECT COUNT(*)::BIGINT AS total_schemas +FROM pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND HAS_SCHEMA_PRIVILEGE('public', n.nspname, 'CREATE') +$$, + q3 = $$ +SELECT + r.rolname::TEXT || ' is the owner of the schema ' || n.nspname::TEXT AS owner_info +FROM + pg_namespace n +JOIN + pg_roles r ON n.nspowner = r.oid +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND ( + r.rolsuper IS TRUE -- Owned by a Superuser (e.g., 'postgres') + OR r.rolname LIKE 'pg_%' -- Owned by a reserved PostgreSQL system role + OR r.rolname = 'postgres' -- Explicitly include the default administrative account + ) +ORDER BY + 1 +$$ +WHERE code = 'S004'; + +-- ============================================================================= +-- S005 - Schema and table owners differ. +-- ============================================================================= +UPDATE pglinter.rules +SET + q1 = $$ +SELECT + COUNT(*)::BIGINT AS total_schema_count +FROM + pg_namespace n +WHERE + n.nspname NOT IN ('pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb') + AND n.nspname NOT LIKE 'pg_%' +$$, + q2 = $$ +SELECT + COUNT(DISTINCT n.nspname) AS schemas_with_mixed_ownership +FROM + pg_namespace n +JOIN + pg_class c ON c.relnamespace = n.oid -- Link schema to its relations (tables) +WHERE + n.nspname NOT IN ( -- Exclude system/technical schemas + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND n.nspname NOT LIKE 'pg_temp%' -- Exclude temp schemas + AND c.relkind = 'r' -- Only count regular tables ('r') + AND n.nspowner <> c.relowner -- Schema owner does NOT equal Table owner +$$, + q3 = $$ +SELECT + 'Owner of schema ' || n.nspname::TEXT || ' is ' || r_schema.rolname::TEXT ||' but owner of table '||n.nspname::TEXT ||'.'|| c.relname::TEXT || ' is ' || r_table.rolname::TEXT AS ownership_info +FROM + pg_namespace n +JOIN + pg_class c ON c.relnamespace = n.oid -- Link schema to its relations (tables) +JOIN + pg_roles r_schema ON n.nspowner = r_schema.oid -- Get schema owner name +JOIN + pg_roles r_table ON c.relowner = r_table.oid -- Get table owner name +WHERE + n.nspname NOT IN ( + 'pg_toast', 'pg_catalog', 'information_schema', 'pglinter', '_timescaledb', 'timescaledb' + ) + AND n.nspname NOT LIKE 'pg_temp%' + AND c.relkind = 'r' -- Only count regular tables + AND n.nspowner <> c.relowner -- The core condition: Owners are different +ORDER BY 1 +$$ +WHERE code = 'S005'; + + +-- ============================================================================= +-- C001 - Memory Configuration Analysis +-- ============================================================================= +UPDATE pglinter.rules +SET q1 = $$ +SELECT + current_setting('max_connections')::int AS max_connections, + current_setting('work_mem') AS work_mem_setting +$$ +WHERE code = 'C001'; + +-- ============================================================================= +-- C002 - Authentication Security (Total) +-- ============================================================================= +UPDATE pglinter.rules +SET q1 = $$ +SELECT count(*)::BIGINT FROM pg_catalog.pg_hba_file_rules +$$ +WHERE code = 'C002'; + +-- ============================================================================= +-- C002 - Authentication Security (Problems) +-- ============================================================================= +UPDATE pglinter.rules +SET q2 = $$ +SELECT count(*)::BIGINT +FROM pg_catalog.pg_hba_file_rules +WHERE auth_method IN ('trust', 'password') +$$ +WHERE code = 'C002'; + +-- ============================================================================= +-- C003 - MD5 encrypted Passwords (Problems) +-- ============================================================================= +UPDATE pglinter.rules +SET q1 = $$ +SELECT 'password_encryption is ' || setting FROM +pg_catalog.pg_settings +WHERE name='password_encryption' AND setting='md5' +$$ +WHERE code = 'C003'; diff --git a/crates/pgls_splinter/Cargo.toml b/crates/pgls_splinter/Cargo.toml index e42d9a51c..5298cf0f4 100644 --- a/crates/pgls_splinter/Cargo.toml +++ b/crates/pgls_splinter/Cargo.toml @@ -23,5 +23,9 @@ insta.workspace = true pgls_console.workspace = true pgls_test_utils.workspace = true +[build-dependencies] +serde_json = "1" +ureq = { version = "2.9", features = ["json"] } + [lib] doctest = false diff --git a/crates/pgls_splinter/build.rs b/crates/pgls_splinter/build.rs new file mode 100644 index 000000000..ba3df6f11 --- /dev/null +++ b/crates/pgls_splinter/build.rs @@ -0,0 +1,104 @@ +use std::fs; +use std::io::Write; +use std::path::Path; + +const EXPECTED_COMMIT: &str = "27ea2ece65464213e466cd969cc61b6940d16219"; +const REPO: &str = "supabase/splinter"; + +fn main() { + let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); + let vendor_dir = Path::new(&manifest_dir).join("vendor"); + let sha_file = vendor_dir.join("COMMIT_SHA.txt"); + + // Check if vendor files exist and SHA matches + let needs_download = if sha_file.exists() { + let current_sha = fs::read_to_string(&sha_file).unwrap_or_default(); + current_sha.trim() != EXPECTED_COMMIT + } else { + true + }; + + if needs_download { + println!("cargo:warning=Downloading splinter vendor files..."); + fs::create_dir_all(&vendor_dir).expect("Failed to create vendor directory"); + + // Discover categories by listing lints/ directory + let categories = list_directories(REPO, EXPECTED_COMMIT, "lints"); + + for category in &categories { + let category_dir = vendor_dir.join(category); + fs::create_dir_all(&category_dir) + .unwrap_or_else(|_| panic!("Failed to create vendor/{category}")); + + download_sql_files( + REPO, + EXPECTED_COMMIT, + &format!("lints/{category}"), + &category_dir, + ); + } + + // Write commit SHA + fs::write(&sha_file, EXPECTED_COMMIT).expect("Failed to write COMMIT_SHA.txt"); + + println!("cargo:warning=Downloaded splinter vendor files successfully"); + } + + println!("cargo:rerun-if-changed=vendor/COMMIT_SHA.txt"); +} + +/// List subdirectories in a GitHub path +fn list_directories(repo: &str, commit: &str, path: &str) -> Vec { + let api_url = format!("https://api.github.com/repos/{repo}/contents/{path}?ref={commit}"); + + let response = ureq::get(&api_url) + .set("User-Agent", "pgls-build") + .call() + .unwrap_or_else(|e| panic!("Failed to list {path}: {e}")); + + let json: serde_json::Value = response + .into_json() + .expect("Failed to parse GitHub API response"); + + json.as_array() + .expect("Expected array from GitHub API") + .iter() + .filter(|item| item["type"].as_str() == Some("dir")) + .filter_map(|item| item["name"].as_str().map(String::from)) + .collect() +} + +/// Download all .sql files from a GitHub directory +fn download_sql_files(repo: &str, commit: &str, path: &str, dest_dir: &Path) { + let api_url = format!("https://api.github.com/repos/{repo}/contents/{path}?ref={commit}"); + + let response = ureq::get(&api_url) + .set("User-Agent", "pgls-build") + .call() + .unwrap_or_else(|e| panic!("Failed to list {path}: {e}")); + + let json: serde_json::Value = response + .into_json() + .expect("Failed to parse GitHub API response"); + + for item in json.as_array().expect("Expected array") { + let name = item["name"].as_str().expect("Missing name"); + if !name.ends_with(".sql") { + continue; + } + + let download_url = item["download_url"].as_str().expect("Missing download_url"); + + let content = ureq::get(download_url) + .call() + .unwrap_or_else(|_| panic!("Failed to download {name}")) + .into_string() + .unwrap_or_else(|_| panic!("Failed to read {name}")); + + let dest = dest_dir.join(name); + let mut file = + fs::File::create(&dest).unwrap_or_else(|_| panic!("Failed to create {name}")); + file.write_all(content.as_bytes()) + .unwrap_or_else(|_| panic!("Failed to write {name}")); + } +} diff --git a/crates/pgls_splinter/src/rules/performance/auth_rls_initplan.rs b/crates/pgls_splinter/src/rules/performance/auth_rls_initplan.rs index bd5c4f34c..47cf62e78 100644 --- a/crates/pgls_splinter/src/rules/performance/auth_rls_initplan.rs +++ b/crates/pgls_splinter/src/rules/performance/auth_rls_initplan.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Auth RLS Initialization Plan\n///\n/// Detects if calls to \\`current_setting()\\` and \\`auth.()\\` in RLS policies are being unnecessarily re-evaluated for each row\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// with policies as (\n/// select\n/// nsp.nspname as schema_name,\n/// pb.tablename as table_name,\n/// pc.relrowsecurity as is_rls_active,\n/// polname as policy_name,\n/// polpermissive as is_permissive, -- if not, then restrictive\n/// (select array_agg(r::regrole) from unnest(polroles) as x(r)) as roles,\n/// case polcmd\n/// when 'r' then 'SELECT'\n/// when 'a' then 'INSERT'\n/// when 'w' then 'UPDATE'\n/// when 'd' then 'DELETE'\n/// when '*' then 'ALL'\n/// end as command,\n/// qual,\n/// with_check\n/// from\n/// pg_catalog.pg_policy pa\n/// join pg_catalog.pg_class pc\n/// on pa.polrelid = pc.oid\n/// join pg_catalog.pg_namespace nsp\n/// on pc.relnamespace = nsp.oid\n/// join pg_catalog.pg_policies pb\n/// on pc.relname = pb.tablename\n/// and nsp.nspname = pb.schemaname\n/// and pa.polname = pb.policyname\n/// )\n/// select\n/// 'auth_rls_initplan' as \"name!\",\n/// 'Auth RLS Initialization Plan' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['PERFORMANCE'] as \"categories!\",\n/// 'Detects if calls to \\`current_setting()\\` and \\`auth.()\\` in RLS policies are being unnecessarily re-evaluated for each row' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` has a row level security policy \\`%s\\` that re-evaluates current_setting() or auth.() for each row. This produces suboptimal query performance at scale. Resolve the issue by replacing \\`auth.()\\` with \\`(select auth.())\\`. See [docs](https://supabase.com/docs/guides/database/postgres/row-level-security#call-functions-with-select) for more info.',\n/// schema_name,\n/// table_name,\n/// policy_name\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0003_auth_rls_initplan' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', schema_name,\n/// 'name', table_name,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format('auth_rls_init_plan_%s_%s_%s', schema_name, table_name, policy_name) as \"cache_key!\"\n/// from\n/// policies\n/// where\n/// is_rls_active\n/// -- NOTE: does not include realtime in support of monitoring policies on realtime.messages\n/// and schema_name not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and (\n/// -- Example: auth.uid()\n/// (\n/// qual like '%auth.uid()%'\n/// and lower(qual) not like '%select auth.uid()%'\n/// )\n/// or (\n/// qual like '%auth.jwt()%'\n/// and lower(qual) not like '%select auth.jwt()%'\n/// )\n/// or (\n/// qual like '%auth.role()%'\n/// and lower(qual) not like '%select auth.role()%'\n/// )\n/// or (\n/// qual like '%auth.email()%'\n/// and lower(qual) not like '%select auth.email()%'\n/// )\n/// or (\n/// qual like '%current\\_setting(%)%'\n/// and lower(qual) not like '%select current\\_setting(%)%'\n/// )\n/// or (\n/// with_check like '%auth.uid()%'\n/// and lower(with_check) not like '%select auth.uid()%'\n/// )\n/// or (\n/// with_check like '%auth.jwt()%'\n/// and lower(with_check) not like '%select auth.jwt()%'\n/// )\n/// or (\n/// with_check like '%auth.role()%'\n/// and lower(with_check) not like '%select auth.role()%'\n/// )\n/// or (\n/// with_check like '%auth.email()%'\n/// and lower(with_check) not like '%select auth.email()%'\n/// )\n/// or (\n/// with_check like '%current\\_setting(%)%'\n/// and lower(with_check) not like '%select current\\_setting(%)%'\n/// )\n/// ))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"performance\": {\n/// \"authRlsInitplan\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub AuthRlsInitplan { version : "1.0.0" , name : "authRlsInitplan" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Auth RLS Initialization Plan\n\nDetects if calls to \\`current_setting()\\` and \\`auth.()\\` in RLS policies are being unnecessarily re-evaluated for each row\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nwith policies as (\n select\n nsp.nspname as schema_name,\n pb.tablename as table_name,\n pc.relrowsecurity as is_rls_active,\n polname as policy_name,\n polpermissive as is_permissive, -- if not, then restrictive\n (select array_agg(r::regrole) from unnest(polroles) as x(r)) as roles,\n case polcmd\n when 'r' then 'SELECT'\n when 'a' then 'INSERT'\n when 'w' then 'UPDATE'\n when 'd' then 'DELETE'\n when '*' then 'ALL'\n end as command,\n qual,\n with_check\n from\n pg_catalog.pg_policy pa\n join pg_catalog.pg_class pc\n on pa.polrelid = pc.oid\n join pg_catalog.pg_namespace nsp\n on pc.relnamespace = nsp.oid\n join pg_catalog.pg_policies pb\n on pc.relname = pb.tablename\n and nsp.nspname = pb.schemaname\n and pa.polname = pb.policyname\n)\nselect\n 'auth_rls_initplan' as \"name!\",\n 'Auth RLS Initialization Plan' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['PERFORMANCE'] as \"categories!\",\n 'Detects if calls to \\`current_setting()\\` and \\`auth.()\\` in RLS policies are being unnecessarily re-evaluated for each row' as \"description!\",\n format(\n 'Table \\`%s.%s\\` has a row level security policy \\`%s\\` that re-evaluates current_setting() or auth.() for each row. This produces suboptimal query performance at scale. Resolve the issue by replacing \\`auth.()\\` with \\`(select auth.())\\`. See [docs](https://supabase.com/docs/guides/database/postgres/row-level-security#call-functions-with-select) for more info.',\n schema_name,\n table_name,\n policy_name\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0003_auth_rls_initplan' as \"remediation!\",\n jsonb_build_object(\n 'schema', schema_name,\n 'name', table_name,\n 'type', 'table'\n ) as \"metadata!\",\n format('auth_rls_init_plan_%s_%s_%s', schema_name, table_name, policy_name) as \"cache_key!\"\nfrom\n policies\nwhere\n is_rls_active\n -- NOTE: does not include realtime in support of monitoring policies on realtime.messages\n and schema_name not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and (\n -- Example: auth.uid()\n (\n qual like '%auth.uid()%'\n and lower(qual) not like '%select auth.uid()%'\n )\n or (\n qual like '%auth.jwt()%'\n and lower(qual) not like '%select auth.jwt()%'\n )\n or (\n qual like '%auth.role()%'\n and lower(qual) not like '%select auth.role()%'\n )\n or (\n qual like '%auth.email()%'\n and lower(qual) not like '%select auth.email()%'\n )\n or (\n qual like '%current\\_setting(%)%'\n and lower(qual) not like '%select current\\_setting(%)%'\n )\n or (\n with_check like '%auth.uid()%'\n and lower(with_check) not like '%select auth.uid()%'\n )\n or (\n with_check like '%auth.jwt()%'\n and lower(with_check) not like '%select auth.jwt()%'\n )\n or (\n with_check like '%auth.role()%'\n and lower(with_check) not like '%select auth.role()%'\n )\n or (\n with_check like '%auth.email()%'\n and lower(with_check) not like '%select auth.email()%'\n )\n or (\n with_check like '%current\\_setting(%)%'\n and lower(with_check) not like '%select current\\_setting(%)%'\n )\n ))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"performance\": {\n \"authRlsInitplan\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub AuthRlsInitplan { version : "1.0.0" , name : "authRlsInitplan" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for AuthRlsInitplan { const SQL_FILE_PATH: &'static str = "performance/auth_rls_initplan.sql"; const DESCRIPTION: &'static str = "Detects if calls to \\`current_setting()\\` and \\`auth.()\\` in RLS policies are being unnecessarily re-evaluated for each row"; diff --git a/crates/pgls_splinter/src/rules/performance/duplicate_index.rs b/crates/pgls_splinter/src/rules/performance/duplicate_index.rs index 841b6a5cb..c0db964ce 100644 --- a/crates/pgls_splinter/src/rules/performance/duplicate_index.rs +++ b/crates/pgls_splinter/src/rules/performance/duplicate_index.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Duplicate Index\n///\n/// Detects cases where two ore more identical indexes exist.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'duplicate_index' as \"name!\",\n/// 'Duplicate Index' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['PERFORMANCE'] as \"categories!\",\n/// 'Detects cases where two ore more identical indexes exist.' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` has identical indexes %s. Drop all except one of them',\n/// n.nspname,\n/// c.relname,\n/// array_agg(pi.indexname order by pi.indexname)\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0009_duplicate_index' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', case\n/// when c.relkind = 'r' then 'table'\n/// when c.relkind = 'm' then 'materialized view'\n/// else 'ERROR'\n/// end,\n/// 'indexes', array_agg(pi.indexname order by pi.indexname)\n/// ) as \"metadata!\",\n/// format(\n/// 'duplicate_index_%s_%s_%s',\n/// n.nspname,\n/// c.relname,\n/// array_agg(pi.indexname order by pi.indexname)\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_indexes pi\n/// join pg_catalog.pg_namespace n\n/// on n.nspname = pi.schemaname\n/// join pg_catalog.pg_class c\n/// on pi.tablename = c.relname\n/// and n.oid = c.relnamespace\n/// left join pg_catalog.pg_depend dep\n/// on c.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// c.relkind in ('r', 'm') -- tables and materialized views\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and dep.objid is null -- exclude tables owned by extensions\n/// group by\n/// n.nspname,\n/// c.relkind,\n/// c.relname,\n/// replace(pi.indexdef, pi.indexname, '')\n/// having\n/// count(*) > 1)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"performance\": {\n/// \"duplicateIndex\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub DuplicateIndex { version : "1.0.0" , name : "duplicateIndex" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Duplicate Index\n\nDetects cases where two ore more identical indexes exist.\n\n## SQL Query\n\n```sql\n(\nselect\n 'duplicate_index' as \"name!\",\n 'Duplicate Index' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['PERFORMANCE'] as \"categories!\",\n 'Detects cases where two ore more identical indexes exist.' as \"description!\",\n format(\n 'Table \\`%s.%s\\` has identical indexes %s. Drop all except one of them',\n n.nspname,\n c.relname,\n array_agg(pi.indexname order by pi.indexname)\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0009_duplicate_index' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', case\n when c.relkind = 'r' then 'table'\n when c.relkind = 'm' then 'materialized view'\n else 'ERROR'\n end,\n 'indexes', array_agg(pi.indexname order by pi.indexname)\n ) as \"metadata!\",\n format(\n 'duplicate_index_%s_%s_%s',\n n.nspname,\n c.relname,\n array_agg(pi.indexname order by pi.indexname)\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_indexes pi\n join pg_catalog.pg_namespace n\n on n.nspname = pi.schemaname\n join pg_catalog.pg_class c\n on pi.tablename = c.relname\n and n.oid = c.relnamespace\n left join pg_catalog.pg_depend dep\n on c.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n c.relkind in ('r', 'm') -- tables and materialized views\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and dep.objid is null -- exclude tables owned by extensions\ngroup by\n n.nspname,\n c.relkind,\n c.relname,\n replace(pi.indexdef, pi.indexname, '')\nhaving\n count(*) > 1)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"performance\": {\n \"duplicateIndex\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub DuplicateIndex { version : "1.0.0" , name : "duplicateIndex" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for DuplicateIndex { const SQL_FILE_PATH: &'static str = "performance/duplicate_index.sql"; const DESCRIPTION: &'static str = "Detects cases where two ore more identical indexes exist."; diff --git a/crates/pgls_splinter/src/rules/performance/multiple_permissive_policies.rs b/crates/pgls_splinter/src/rules/performance/multiple_permissive_policies.rs index 227551a04..995ef8810 100644 --- a/crates/pgls_splinter/src/rules/performance/multiple_permissive_policies.rs +++ b/crates/pgls_splinter/src/rules/performance/multiple_permissive_policies.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Multiple Permissive Policies\n///\n/// Detects if multiple permissive row level security policies are present on a table for the same \\`role\\` and \\`action\\` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'multiple_permissive_policies' as \"name!\",\n/// 'Multiple Permissive Policies' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['PERFORMANCE'] as \"categories!\",\n/// 'Detects if multiple permissive row level security policies are present on a table for the same \\`role\\` and \\`action\\` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query.' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` has multiple permissive policies for role \\`%s\\` for action \\`%s\\`. Policies include \\`%s\\`',\n/// n.nspname,\n/// c.relname,\n/// r.rolname,\n/// act.cmd,\n/// array_agg(p.polname order by p.polname)\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0006_multiple_permissive_policies' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'multiple_permissive_policies_%s_%s_%s_%s',\n/// n.nspname,\n/// c.relname,\n/// r.rolname,\n/// act.cmd\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_policy p\n/// join pg_catalog.pg_class c\n/// on p.polrelid = c.oid\n/// join pg_catalog.pg_namespace n\n/// on c.relnamespace = n.oid\n/// join pg_catalog.pg_roles r\n/// on p.polroles @> array[r.oid]\n/// or p.polroles = array[0::oid]\n/// left join pg_catalog.pg_depend dep\n/// on c.oid = dep.objid\n/// and dep.deptype = 'e',\n/// lateral (\n/// select x.cmd\n/// from unnest((\n/// select\n/// case p.polcmd\n/// when 'r' then array['SELECT']\n/// when 'a' then array['INSERT']\n/// when 'w' then array['UPDATE']\n/// when 'd' then array['DELETE']\n/// when '*' then array['SELECT', 'INSERT', 'UPDATE', 'DELETE']\n/// else array['ERROR']\n/// end as actions\n/// )) x(cmd)\n/// ) act(cmd)\n/// where\n/// c.relkind = 'r' -- regular tables\n/// and p.polpermissive -- policy is permissive\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and r.rolname not like 'pg_%'\n/// and r.rolname not like 'supabase%admin'\n/// and not r.rolbypassrls\n/// and dep.objid is null -- exclude tables owned by extensions\n/// group by\n/// n.nspname,\n/// c.relname,\n/// r.rolname,\n/// act.cmd\n/// having\n/// count(1) > 1)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"performance\": {\n/// \"multiplePermissivePolicies\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub MultiplePermissivePolicies { version : "1.0.0" , name : "multiplePermissivePolicies" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Multiple Permissive Policies\n\nDetects if multiple permissive row level security policies are present on a table for the same \\`role\\` and \\`action\\` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query.\n\n## SQL Query\n\n```sql\n(\nselect\n 'multiple_permissive_policies' as \"name!\",\n 'Multiple Permissive Policies' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['PERFORMANCE'] as \"categories!\",\n 'Detects if multiple permissive row level security policies are present on a table for the same \\`role\\` and \\`action\\` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query.' as \"description!\",\n format(\n 'Table \\`%s.%s\\` has multiple permissive policies for role \\`%s\\` for action \\`%s\\`. Policies include \\`%s\\`',\n n.nspname,\n c.relname,\n r.rolname,\n act.cmd,\n array_agg(p.polname order by p.polname)\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0006_multiple_permissive_policies' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'multiple_permissive_policies_%s_%s_%s_%s',\n n.nspname,\n c.relname,\n r.rolname,\n act.cmd\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_policy p\n join pg_catalog.pg_class c\n on p.polrelid = c.oid\n join pg_catalog.pg_namespace n\n on c.relnamespace = n.oid\n join pg_catalog.pg_roles r\n on p.polroles @> array[r.oid]\n or p.polroles = array[0::oid]\n left join pg_catalog.pg_depend dep\n on c.oid = dep.objid\n and dep.deptype = 'e',\n lateral (\n select x.cmd\n from unnest((\n select\n case p.polcmd\n when 'r' then array['SELECT']\n when 'a' then array['INSERT']\n when 'w' then array['UPDATE']\n when 'd' then array['DELETE']\n when '*' then array['SELECT', 'INSERT', 'UPDATE', 'DELETE']\n else array['ERROR']\n end as actions\n )) x(cmd)\n ) act(cmd)\nwhere\n c.relkind = 'r' -- regular tables\n and p.polpermissive -- policy is permissive\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and r.rolname not like 'pg_%'\n and r.rolname not like 'supabase%admin'\n and not r.rolbypassrls\n and dep.objid is null -- exclude tables owned by extensions\ngroup by\n n.nspname,\n c.relname,\n r.rolname,\n act.cmd\nhaving\n count(1) > 1)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"performance\": {\n \"multiplePermissivePolicies\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub MultiplePermissivePolicies { version : "1.0.0" , name : "multiplePermissivePolicies" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for MultiplePermissivePolicies { const SQL_FILE_PATH: &'static str = "performance/multiple_permissive_policies.sql"; const DESCRIPTION: &'static str = "Detects if multiple permissive row level security policies are present on a table for the same \\`role\\` and \\`action\\` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query."; diff --git a/crates/pgls_splinter/src/rules/performance/no_primary_key.rs b/crates/pgls_splinter/src/rules/performance/no_primary_key.rs index d65f9fc80..e31d50562 100644 --- a/crates/pgls_splinter/src/rules/performance/no_primary_key.rs +++ b/crates/pgls_splinter/src/rules/performance/no_primary_key.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # No Primary Key\n///\n/// Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'no_primary_key' as \"name!\",\n/// 'No Primary Key' as \"title!\",\n/// 'INFO' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['PERFORMANCE'] as \"categories!\",\n/// 'Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale.' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` does not have a primary key',\n/// pgns.nspname,\n/// pgc.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0004_no_primary_key' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', pgns.nspname,\n/// 'name', pgc.relname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'no_primary_key_%s_%s',\n/// pgns.nspname,\n/// pgc.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_class pgc\n/// join pg_catalog.pg_namespace pgns\n/// on pgns.oid = pgc.relnamespace\n/// left join pg_catalog.pg_index pgi\n/// on pgi.indrelid = pgc.oid\n/// left join pg_catalog.pg_depend dep\n/// on pgc.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// pgc.relkind = 'r' -- regular tables\n/// and pgns.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and dep.objid is null -- exclude tables owned by extensions\n/// group by\n/// pgc.oid,\n/// pgns.nspname,\n/// pgc.relname\n/// having\n/// max(coalesce(pgi.indisprimary, false)::int) = 0)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"performance\": {\n/// \"noPrimaryKey\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub NoPrimaryKey { version : "1.0.0" , name : "noPrimaryKey" , severity : pgls_diagnostics :: Severity :: Information , } } +::pgls_analyse::declare_rule! { # [doc = "# No Primary Key\n\nDetects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale.\n\n## SQL Query\n\n```sql\n(\nselect\n 'no_primary_key' as \"name!\",\n 'No Primary Key' as \"title!\",\n 'INFO' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['PERFORMANCE'] as \"categories!\",\n 'Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale.' as \"description!\",\n format(\n 'Table \\`%s.%s\\` does not have a primary key',\n pgns.nspname,\n pgc.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0004_no_primary_key' as \"remediation!\",\n jsonb_build_object(\n 'schema', pgns.nspname,\n 'name', pgc.relname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'no_primary_key_%s_%s',\n pgns.nspname,\n pgc.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_class pgc\n join pg_catalog.pg_namespace pgns\n on pgns.oid = pgc.relnamespace\n left join pg_catalog.pg_index pgi\n on pgi.indrelid = pgc.oid\n left join pg_catalog.pg_depend dep\n on pgc.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n pgc.relkind = 'r' -- regular tables\n and pgns.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and dep.objid is null -- exclude tables owned by extensions\ngroup by\n pgc.oid,\n pgns.nspname,\n pgc.relname\nhaving\n max(coalesce(pgi.indisprimary, false)::int) = 0)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"performance\": {\n \"noPrimaryKey\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub NoPrimaryKey { version : "1.0.0" , name : "noPrimaryKey" , severity : pgls_diagnostics :: Severity :: Information , recommended : true , } } impl SplinterRule for NoPrimaryKey { const SQL_FILE_PATH: &'static str = "performance/no_primary_key.sql"; const DESCRIPTION: &'static str = "Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale."; diff --git a/crates/pgls_splinter/src/rules/performance/table_bloat.rs b/crates/pgls_splinter/src/rules/performance/table_bloat.rs index 58e34d3b2..d286b1b9c 100644 --- a/crates/pgls_splinter/src/rules/performance/table_bloat.rs +++ b/crates/pgls_splinter/src/rules/performance/table_bloat.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Table Bloat\n///\n/// Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// with constants as (\n/// select current_setting('block_size')::numeric as bs, 23 as hdr, 4 as ma\n/// ),\n/// \n/// bloat_info as (\n/// select\n/// ma,\n/// bs,\n/// schemaname,\n/// tablename,\n/// (datawidth + (hdr + ma - (case when hdr % ma = 0 then ma else hdr % ma end)))::numeric as datahdr,\n/// (maxfracsum * (nullhdr + ma - (case when nullhdr % ma = 0 then ma else nullhdr % ma end))) as nullhdr2\n/// from (\n/// select\n/// schemaname,\n/// tablename,\n/// hdr,\n/// ma,\n/// bs,\n/// sum((1 - null_frac) * avg_width) as datawidth,\n/// max(null_frac) as maxfracsum,\n/// hdr + (\n/// select 1 + count(*) / 8\n/// from pg_stats s2\n/// where\n/// null_frac <> 0\n/// and s2.schemaname = s.schemaname\n/// and s2.tablename = s.tablename\n/// ) as nullhdr\n/// from pg_stats s, constants\n/// group by 1, 2, 3, 4, 5\n/// ) as foo\n/// ),\n/// \n/// table_bloat as (\n/// select\n/// schemaname,\n/// tablename,\n/// cc.relpages,\n/// bs,\n/// ceil((cc.reltuples * ((datahdr + ma -\n/// (case when datahdr % ma = 0 then ma else datahdr % ma end)) + nullhdr2 + 4)) / (bs - 20::float)) as otta\n/// from\n/// bloat_info\n/// join pg_class cc\n/// on cc.relname = bloat_info.tablename\n/// join pg_namespace nn\n/// on cc.relnamespace = nn.oid\n/// and nn.nspname = bloat_info.schemaname\n/// and nn.nspname <> 'information_schema'\n/// where\n/// cc.relkind = 'r'\n/// and cc.relam = (select oid from pg_am where amname = 'heap')\n/// ),\n/// \n/// bloat_data as (\n/// select\n/// 'table' as type,\n/// schemaname,\n/// tablename as object_name,\n/// round(case when otta = 0 then 0.0 else table_bloat.relpages / otta::numeric end, 1) as bloat,\n/// case when relpages < otta then 0 else (bs * (table_bloat.relpages - otta)::bigint)::bigint end as raw_waste\n/// from\n/// table_bloat\n/// )\n/// \n/// select\n/// 'table_bloat' as \"name!\",\n/// 'Table Bloat' as \"title!\",\n/// 'INFO' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['PERFORMANCE'] as \"categories!\",\n/// 'Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster.' as \"description!\",\n/// format(\n/// 'Table `%s`.`%s` has excessive bloat',\n/// bloat_data.schemaname,\n/// bloat_data.object_name\n/// ) as \"detail!\",\n/// 'Consider running vacuum full (WARNING: incurs downtime) and tweaking autovacuum settings to reduce bloat.' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', bloat_data.schemaname,\n/// 'name', bloat_data.object_name,\n/// 'type', bloat_data.type\n/// ) as \"metadata!\",\n/// format(\n/// 'table_bloat_%s_%s',\n/// bloat_data.schemaname,\n/// bloat_data.object_name\n/// ) as \"cache_key!\"\n/// from\n/// bloat_data\n/// where\n/// bloat > 70.0\n/// and raw_waste > (20 * 1024 * 1024) -- filter for waste > 200 MB\n/// order by\n/// schemaname,\n/// object_name)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"performance\": {\n/// \"tableBloat\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub TableBloat { version : "1.0.0" , name : "tableBloat" , severity : pgls_diagnostics :: Severity :: Information , } } +::pgls_analyse::declare_rule! { # [doc = "# Table Bloat\n\nDetects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster.\n\n## SQL Query\n\n```sql\n(\nwith constants as (\n select current_setting('block_size')::numeric as bs, 23 as hdr, 4 as ma\n),\n\nbloat_info as (\n select\n ma,\n bs,\n schemaname,\n tablename,\n (datawidth + (hdr + ma - (case when hdr % ma = 0 then ma else hdr % ma end)))::numeric as datahdr,\n (maxfracsum * (nullhdr + ma - (case when nullhdr % ma = 0 then ma else nullhdr % ma end))) as nullhdr2\n from (\n select\n schemaname,\n tablename,\n hdr,\n ma,\n bs,\n sum((1 - null_frac) * avg_width) as datawidth,\n max(null_frac) as maxfracsum,\n hdr + (\n select 1 + count(*) / 8\n from pg_stats s2\n where\n null_frac <> 0\n and s2.schemaname = s.schemaname\n and s2.tablename = s.tablename\n ) as nullhdr\n from pg_stats s, constants\n group by 1, 2, 3, 4, 5\n ) as foo\n),\n\ntable_bloat as (\n select\n schemaname,\n tablename,\n cc.relpages,\n bs,\n ceil((cc.reltuples * ((datahdr + ma -\n (case when datahdr % ma = 0 then ma else datahdr % ma end)) + nullhdr2 + 4)) / (bs - 20::float)) as otta\n from\n bloat_info\n join pg_class cc\n on cc.relname = bloat_info.tablename\n join pg_namespace nn\n on cc.relnamespace = nn.oid\n and nn.nspname = bloat_info.schemaname\n and nn.nspname <> 'information_schema'\n where\n cc.relkind = 'r'\n and cc.relam = (select oid from pg_am where amname = 'heap')\n),\n\nbloat_data as (\n select\n 'table' as type,\n schemaname,\n tablename as object_name,\n round(case when otta = 0 then 0.0 else table_bloat.relpages / otta::numeric end, 1) as bloat,\n case when relpages < otta then 0 else (bs * (table_bloat.relpages - otta)::bigint)::bigint end as raw_waste\n from\n table_bloat\n)\n\nselect\n 'table_bloat' as \"name!\",\n 'Table Bloat' as \"title!\",\n 'INFO' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['PERFORMANCE'] as \"categories!\",\n 'Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster.' as \"description!\",\n format(\n 'Table `%s`.`%s` has excessive bloat',\n bloat_data.schemaname,\n bloat_data.object_name\n ) as \"detail!\",\n 'Consider running vacuum full (WARNING: incurs downtime) and tweaking autovacuum settings to reduce bloat.' as \"remediation!\",\n jsonb_build_object(\n 'schema', bloat_data.schemaname,\n 'name', bloat_data.object_name,\n 'type', bloat_data.type\n ) as \"metadata!\",\n format(\n 'table_bloat_%s_%s',\n bloat_data.schemaname,\n bloat_data.object_name\n ) as \"cache_key!\"\nfrom\n bloat_data\nwhere\n bloat > 70.0\n and raw_waste > (20 * 1024 * 1024) -- filter for waste > 200 MB\norder by\n schemaname,\n object_name)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"performance\": {\n \"tableBloat\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub TableBloat { version : "1.0.0" , name : "tableBloat" , severity : pgls_diagnostics :: Severity :: Information , recommended : true , } } impl SplinterRule for TableBloat { const SQL_FILE_PATH: &'static str = "performance/table_bloat.sql"; const DESCRIPTION: &'static str = "Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster."; diff --git a/crates/pgls_splinter/src/rules/performance/unindexed_foreign_keys.rs b/crates/pgls_splinter/src/rules/performance/unindexed_foreign_keys.rs index c4542cb0f..832757562 100644 --- a/crates/pgls_splinter/src/rules/performance/unindexed_foreign_keys.rs +++ b/crates/pgls_splinter/src/rules/performance/unindexed_foreign_keys.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Unindexed foreign keys\n///\n/// Identifies foreign key constraints without a covering index, which can impact database performance.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// with foreign_keys as (\n/// select\n/// cl.relnamespace::regnamespace::text as schema_name,\n/// cl.relname as table_name,\n/// cl.oid as table_oid,\n/// ct.conname as fkey_name,\n/// ct.conkey as col_attnums\n/// from\n/// pg_catalog.pg_constraint ct\n/// join pg_catalog.pg_class cl -- fkey owning table\n/// on ct.conrelid = cl.oid\n/// left join pg_catalog.pg_depend d\n/// on d.objid = cl.oid\n/// and d.deptype = 'e'\n/// where\n/// ct.contype = 'f' -- foreign key constraints\n/// and d.objid is null -- exclude tables that are dependencies of extensions\n/// and cl.relnamespace::regnamespace::text not in (\n/// 'pg_catalog', 'information_schema', 'auth', 'storage', 'vault', 'extensions'\n/// )\n/// ),\n/// index_ as (\n/// select\n/// pi.indrelid as table_oid,\n/// indexrelid::regclass as index_,\n/// string_to_array(indkey::text, ' ')::smallint[] as col_attnums\n/// from\n/// pg_catalog.pg_index pi\n/// where\n/// indisvalid\n/// )\n/// select\n/// 'unindexed_foreign_keys' as \"name!\",\n/// 'Unindexed foreign keys' as \"title!\",\n/// 'INFO' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['PERFORMANCE'] as \"categories!\",\n/// 'Identifies foreign key constraints without a covering index, which can impact database performance.' as \"description!\",\n/// format(\n/// 'Table `%s.%s` has a foreign key `%s` without a covering index. This can lead to suboptimal query performance.',\n/// fk.schema_name,\n/// fk.table_name,\n/// fk.fkey_name\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0001_unindexed_foreign_keys' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', fk.schema_name,\n/// 'name', fk.table_name,\n/// 'type', 'table',\n/// 'fkey_name', fk.fkey_name,\n/// 'fkey_columns', fk.col_attnums\n/// ) as \"metadata!\",\n/// format('unindexed_foreign_keys_%s_%s_%s', fk.schema_name, fk.table_name, fk.fkey_name) as \"cache_key!\"\n/// from\n/// foreign_keys fk\n/// left join index_ idx\n/// on fk.table_oid = idx.table_oid\n/// and fk.col_attnums = idx.col_attnums[1:array_length(fk.col_attnums, 1)]\n/// left join pg_catalog.pg_depend dep\n/// on idx.table_oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// idx.index_ is null\n/// and fk.schema_name not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and dep.objid is null -- exclude tables owned by extensions\n/// order by\n/// fk.schema_name,\n/// fk.table_name,\n/// fk.fkey_name\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"performance\": {\n/// \"unindexedForeignKeys\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub UnindexedForeignKeys { version : "1.0.0" , name : "unindexedForeignKeys" , severity : pgls_diagnostics :: Severity :: Information , } } +::pgls_analyse::declare_rule! { # [doc = "# Unindexed foreign keys\n\nIdentifies foreign key constraints without a covering index, which can impact database performance.\n\n## SQL Query\n\n```sql\nwith foreign_keys as (\n select\n cl.relnamespace::regnamespace::text as schema_name,\n cl.relname as table_name,\n cl.oid as table_oid,\n ct.conname as fkey_name,\n ct.conkey as col_attnums\n from\n pg_catalog.pg_constraint ct\n join pg_catalog.pg_class cl -- fkey owning table\n on ct.conrelid = cl.oid\n left join pg_catalog.pg_depend d\n on d.objid = cl.oid\n and d.deptype = 'e'\n where\n ct.contype = 'f' -- foreign key constraints\n and d.objid is null -- exclude tables that are dependencies of extensions\n and cl.relnamespace::regnamespace::text not in (\n 'pg_catalog', 'information_schema', 'auth', 'storage', 'vault', 'extensions'\n )\n),\nindex_ as (\n select\n pi.indrelid as table_oid,\n indexrelid::regclass as index_,\n string_to_array(indkey::text, ' ')::smallint[] as col_attnums\n from\n pg_catalog.pg_index pi\n where\n indisvalid\n)\nselect\n 'unindexed_foreign_keys' as \"name!\",\n 'Unindexed foreign keys' as \"title!\",\n 'INFO' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['PERFORMANCE'] as \"categories!\",\n 'Identifies foreign key constraints without a covering index, which can impact database performance.' as \"description!\",\n format(\n 'Table `%s.%s` has a foreign key `%s` without a covering index. This can lead to suboptimal query performance.',\n fk.schema_name,\n fk.table_name,\n fk.fkey_name\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0001_unindexed_foreign_keys' as \"remediation!\",\n jsonb_build_object(\n 'schema', fk.schema_name,\n 'name', fk.table_name,\n 'type', 'table',\n 'fkey_name', fk.fkey_name,\n 'fkey_columns', fk.col_attnums\n ) as \"metadata!\",\n format('unindexed_foreign_keys_%s_%s_%s', fk.schema_name, fk.table_name, fk.fkey_name) as \"cache_key!\"\nfrom\n foreign_keys fk\n left join index_ idx\n on fk.table_oid = idx.table_oid\n and fk.col_attnums = idx.col_attnums[1:array_length(fk.col_attnums, 1)]\n left join pg_catalog.pg_depend dep\n on idx.table_oid = dep.objid\n and dep.deptype = 'e'\nwhere\n idx.index_ is null\n and fk.schema_name not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and dep.objid is null -- exclude tables owned by extensions\norder by\n fk.schema_name,\n fk.table_name,\n fk.fkey_name\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"performance\": {\n \"unindexedForeignKeys\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub UnindexedForeignKeys { version : "1.0.0" , name : "unindexedForeignKeys" , severity : pgls_diagnostics :: Severity :: Information , recommended : true , } } impl SplinterRule for UnindexedForeignKeys { const SQL_FILE_PATH: &'static str = "performance/unindexed_foreign_keys.sql"; const DESCRIPTION: &'static str = "Identifies foreign key constraints without a covering index, which can impact database performance."; diff --git a/crates/pgls_splinter/src/rules/performance/unused_index.rs b/crates/pgls_splinter/src/rules/performance/unused_index.rs index 3f0c4db67..bc9eed6b1 100644 --- a/crates/pgls_splinter/src/rules/performance/unused_index.rs +++ b/crates/pgls_splinter/src/rules/performance/unused_index.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Unused Index\n///\n/// Detects if an index has never been used and may be a candidate for removal.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'unused_index' as \"name!\",\n/// 'Unused Index' as \"title!\",\n/// 'INFO' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['PERFORMANCE'] as \"categories!\",\n/// 'Detects if an index has never been used and may be a candidate for removal.' as \"description!\",\n/// format(\n/// 'Index \\`%s\\` on table \\`%s.%s\\` has not been used',\n/// psui.indexrelname,\n/// psui.schemaname,\n/// psui.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0005_unused_index' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', psui.schemaname,\n/// 'name', psui.relname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'unused_index_%s_%s_%s',\n/// psui.schemaname,\n/// psui.relname,\n/// psui.indexrelname\n/// ) as \"cache_key!\"\n/// \n/// from\n/// pg_catalog.pg_stat_user_indexes psui\n/// join pg_catalog.pg_index pi\n/// on psui.indexrelid = pi.indexrelid\n/// left join pg_catalog.pg_depend dep\n/// on psui.relid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// psui.idx_scan = 0\n/// and not pi.indisunique\n/// and not pi.indisprimary\n/// and dep.objid is null -- exclude tables owned by extensions\n/// and psui.schemaname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// ))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"performance\": {\n/// \"unusedIndex\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub UnusedIndex { version : "1.0.0" , name : "unusedIndex" , severity : pgls_diagnostics :: Severity :: Information , } } +::pgls_analyse::declare_rule! { # [doc = "# Unused Index\n\nDetects if an index has never been used and may be a candidate for removal.\n\n## SQL Query\n\n```sql\n(\nselect\n 'unused_index' as \"name!\",\n 'Unused Index' as \"title!\",\n 'INFO' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['PERFORMANCE'] as \"categories!\",\n 'Detects if an index has never been used and may be a candidate for removal.' as \"description!\",\n format(\n 'Index \\`%s\\` on table \\`%s.%s\\` has not been used',\n psui.indexrelname,\n psui.schemaname,\n psui.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0005_unused_index' as \"remediation!\",\n jsonb_build_object(\n 'schema', psui.schemaname,\n 'name', psui.relname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'unused_index_%s_%s_%s',\n psui.schemaname,\n psui.relname,\n psui.indexrelname\n ) as \"cache_key!\"\n\nfrom\n pg_catalog.pg_stat_user_indexes psui\n join pg_catalog.pg_index pi\n on psui.indexrelid = pi.indexrelid\n left join pg_catalog.pg_depend dep\n on psui.relid = dep.objid\n and dep.deptype = 'e'\nwhere\n psui.idx_scan = 0\n and not pi.indisunique\n and not pi.indisprimary\n and dep.objid is null -- exclude tables owned by extensions\n and psui.schemaname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n ))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"performance\": {\n \"unusedIndex\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub UnusedIndex { version : "1.0.0" , name : "unusedIndex" , severity : pgls_diagnostics :: Severity :: Information , recommended : true , } } impl SplinterRule for UnusedIndex { const SQL_FILE_PATH: &'static str = "performance/unused_index.sql"; const DESCRIPTION: &'static str = diff --git a/crates/pgls_splinter/src/rules/security/auth_users_exposed.rs b/crates/pgls_splinter/src/rules/security/auth_users_exposed.rs index 9f0372fdf..eb0eb7692 100644 --- a/crates/pgls_splinter/src/rules/security/auth_users_exposed.rs +++ b/crates/pgls_splinter/src/rules/security/auth_users_exposed.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Exposed Auth Users\n///\n/// Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security.\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'auth_users_exposed' as \"name!\",\n/// 'Exposed Auth Users' as \"title!\",\n/// 'ERROR' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security.' as \"description!\",\n/// format(\n/// 'View/Materialized View \"%s\" in the public schema may expose \\`auth.users\\` data to anon or authenticated roles.',\n/// c.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0002_auth_users_exposed' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'view',\n/// 'exposed_to', array_remove(array_agg(DISTINCT case when pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') then 'anon' when pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') then 'authenticated' end), null)\n/// ) as \"metadata!\",\n/// format('auth_users_exposed_%s_%s', n.nspname, c.relname) as \"cache_key!\"\n/// from\n/// -- Identify the oid for auth.users\n/// pg_catalog.pg_class auth_users_pg_class\n/// join pg_catalog.pg_namespace auth_users_pg_namespace\n/// on auth_users_pg_class.relnamespace = auth_users_pg_namespace.oid\n/// and auth_users_pg_class.relname = 'users'\n/// and auth_users_pg_namespace.nspname = 'auth'\n/// -- Depends on auth.users\n/// join pg_catalog.pg_depend d\n/// on d.refobjid = auth_users_pg_class.oid\n/// join pg_catalog.pg_rewrite r\n/// on r.oid = d.objid\n/// join pg_catalog.pg_class c\n/// on c.oid = r.ev_class\n/// join pg_catalog.pg_namespace n\n/// on n.oid = c.relnamespace\n/// join pg_catalog.pg_class pg_class_auth_users\n/// on d.refobjid = pg_class_auth_users.oid\n/// where\n/// d.deptype = 'n'\n/// and (\n/// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n/// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n/// )\n/// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n/// -- Exclude self\n/// and c.relname <> '0002_auth_users_exposed'\n/// -- There are 3 insecure configurations\n/// and\n/// (\n/// -- Materialized views don't support RLS so this is insecure by default\n/// (c.relkind in ('m')) -- m for materialized view\n/// or\n/// -- Standard View, accessible to anon or authenticated that is security_definer\n/// (\n/// c.relkind = 'v' -- v for view\n/// -- Exclude security invoker views\n/// and not (\n/// lower(coalesce(c.reloptions::text,'{}'))::text[]\n/// && array[\n/// 'security_invoker=1',\n/// 'security_invoker=true',\n/// 'security_invoker=yes',\n/// 'security_invoker=on'\n/// ]\n/// )\n/// )\n/// or\n/// -- Standard View, security invoker, but no RLS enabled on auth.users\n/// (\n/// c.relkind in ('v') -- v for view\n/// -- is security invoker\n/// and (\n/// lower(coalesce(c.reloptions::text,'{}'))::text[]\n/// && array[\n/// 'security_invoker=1',\n/// 'security_invoker=true',\n/// 'security_invoker=yes',\n/// 'security_invoker=on'\n/// ]\n/// )\n/// and not pg_class_auth_users.relrowsecurity\n/// )\n/// )\n/// group by\n/// n.nspname,\n/// c.relname,\n/// c.oid)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"authUsersExposed\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub AuthUsersExposed { version : "1.0.0" , name : "authUsersExposed" , severity : pgls_diagnostics :: Severity :: Error , } } +::pgls_analyse::declare_rule! { # [doc = "# Exposed Auth Users\n\nDetects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security.\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nselect\n 'auth_users_exposed' as \"name!\",\n 'Exposed Auth Users' as \"title!\",\n 'ERROR' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security.' as \"description!\",\n format(\n 'View/Materialized View \"%s\" in the public schema may expose \\`auth.users\\` data to anon or authenticated roles.',\n c.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0002_auth_users_exposed' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'view',\n 'exposed_to', array_remove(array_agg(DISTINCT case when pg_catalog.has_table_privilege('anon', c.oid, 'SELECT') then 'anon' when pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT') then 'authenticated' end), null)\n ) as \"metadata!\",\n format('auth_users_exposed_%s_%s', n.nspname, c.relname) as \"cache_key!\"\nfrom\n -- Identify the oid for auth.users\n pg_catalog.pg_class auth_users_pg_class\n join pg_catalog.pg_namespace auth_users_pg_namespace\n on auth_users_pg_class.relnamespace = auth_users_pg_namespace.oid\n and auth_users_pg_class.relname = 'users'\n and auth_users_pg_namespace.nspname = 'auth'\n -- Depends on auth.users\n join pg_catalog.pg_depend d\n on d.refobjid = auth_users_pg_class.oid\n join pg_catalog.pg_rewrite r\n on r.oid = d.objid\n join pg_catalog.pg_class c\n on c.oid = r.ev_class\n join pg_catalog.pg_namespace n\n on n.oid = c.relnamespace\n join pg_catalog.pg_class pg_class_auth_users\n on d.refobjid = pg_class_auth_users.oid\nwhere\n d.deptype = 'n'\n and (\n pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n )\n and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n -- Exclude self\n and c.relname <> '0002_auth_users_exposed'\n -- There are 3 insecure configurations\n and\n (\n -- Materialized views don't support RLS so this is insecure by default\n (c.relkind in ('m')) -- m for materialized view\n or\n -- Standard View, accessible to anon or authenticated that is security_definer\n (\n c.relkind = 'v' -- v for view\n -- Exclude security invoker views\n and not (\n lower(coalesce(c.reloptions::text,'{}'))::text[]\n && array[\n 'security_invoker=1',\n 'security_invoker=true',\n 'security_invoker=yes',\n 'security_invoker=on'\n ]\n )\n )\n or\n -- Standard View, security invoker, but no RLS enabled on auth.users\n (\n c.relkind in ('v') -- v for view\n -- is security invoker\n and (\n lower(coalesce(c.reloptions::text,'{}'))::text[]\n && array[\n 'security_invoker=1',\n 'security_invoker=true',\n 'security_invoker=yes',\n 'security_invoker=on'\n ]\n )\n and not pg_class_auth_users.relrowsecurity\n )\n )\ngroup by\n n.nspname,\n c.relname,\n c.oid)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"authUsersExposed\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub AuthUsersExposed { version : "1.0.0" , name : "authUsersExposed" , severity : pgls_diagnostics :: Severity :: Error , recommended : true , } } impl SplinterRule for AuthUsersExposed { const SQL_FILE_PATH: &'static str = "security/auth_users_exposed.sql"; const DESCRIPTION: &'static str = "Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security."; diff --git a/crates/pgls_splinter/src/rules/security/extension_in_public.rs b/crates/pgls_splinter/src/rules/security/extension_in_public.rs index 02aa1adec..d40fc4c1a 100644 --- a/crates/pgls_splinter/src/rules/security/extension_in_public.rs +++ b/crates/pgls_splinter/src/rules/security/extension_in_public.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Extension in Public\n///\n/// Detects extensions installed in the \\`public\\` schema.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'extension_in_public' as \"name!\",\n/// 'Extension in Public' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects extensions installed in the \\`public\\` schema.' as \"description!\",\n/// format(\n/// 'Extension \\`%s\\` is installed in the public schema. Move it to another schema.',\n/// pe.extname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0014_extension_in_public' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', pe.extnamespace::regnamespace,\n/// 'name', pe.extname,\n/// 'type', 'extension'\n/// ) as \"metadata!\",\n/// format(\n/// 'extension_in_public_%s',\n/// pe.extname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_extension pe\n/// where\n/// -- plpgsql is installed by default in public and outside user control\n/// -- confirmed safe\n/// pe.extname not in ('plpgsql')\n/// -- Scoping this to public is not optimal. Ideally we would use the postgres\n/// -- search path. That currently isn't available via SQL. In other lints\n/// -- we have used has_schema_privilege('anon', 'extensions', 'USAGE') but that\n/// -- is not appropriate here as it would evaluate true for the extensions schema\n/// and pe.extnamespace::regnamespace::text = 'public')\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"extensionInPublic\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub ExtensionInPublic { version : "1.0.0" , name : "extensionInPublic" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Extension in Public\n\nDetects extensions installed in the \\`public\\` schema.\n\n## SQL Query\n\n```sql\n(\nselect\n 'extension_in_public' as \"name!\",\n 'Extension in Public' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects extensions installed in the \\`public\\` schema.' as \"description!\",\n format(\n 'Extension \\`%s\\` is installed in the public schema. Move it to another schema.',\n pe.extname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0014_extension_in_public' as \"remediation!\",\n jsonb_build_object(\n 'schema', pe.extnamespace::regnamespace,\n 'name', pe.extname,\n 'type', 'extension'\n ) as \"metadata!\",\n format(\n 'extension_in_public_%s',\n pe.extname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_extension pe\nwhere\n -- plpgsql is installed by default in public and outside user control\n -- confirmed safe\n pe.extname not in ('plpgsql')\n -- Scoping this to public is not optimal. Ideally we would use the postgres\n -- search path. That currently isn't available via SQL. In other lints\n -- we have used has_schema_privilege('anon', 'extensions', 'USAGE') but that\n -- is not appropriate here as it would evaluate true for the extensions schema\n and pe.extnamespace::regnamespace::text = 'public')\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"extensionInPublic\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub ExtensionInPublic { version : "1.0.0" , name : "extensionInPublic" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for ExtensionInPublic { const SQL_FILE_PATH: &'static str = "security/extension_in_public.sql"; const DESCRIPTION: &'static str = "Detects extensions installed in the \\`public\\` schema."; diff --git a/crates/pgls_splinter/src/rules/security/extension_versions_outdated.rs b/crates/pgls_splinter/src/rules/security/extension_versions_outdated.rs index 066ba0b9c..7b6d31aac 100644 --- a/crates/pgls_splinter/src/rules/security/extension_versions_outdated.rs +++ b/crates/pgls_splinter/src/rules/security/extension_versions_outdated.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Extension Versions Outdated\n///\n/// Detects extensions that are not using the default (recommended) version.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'extension_versions_outdated' as \"name!\",\n/// 'Extension Versions Outdated' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects extensions that are not using the default (recommended) version.' as \"description!\",\n/// format(\n/// 'Extension `%s` is using version `%s` but version `%s` is available. Using outdated extension versions may expose the database to security vulnerabilities.',\n/// ext.name,\n/// ext.installed_version,\n/// ext.default_version\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0022_extension_versions_outdated' as \"remediation!\",\n/// jsonb_build_object(\n/// 'extension_name', ext.name,\n/// 'installed_version', ext.installed_version,\n/// 'default_version', ext.default_version\n/// ) as \"metadata!\",\n/// format(\n/// 'extension_versions_outdated_%s_%s',\n/// ext.name,\n/// ext.installed_version\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_available_extensions ext\n/// join\n/// -- ignore versions not in pg_available_extension_versions\n/// -- e.g. residue of pg_upgrade\n/// pg_catalog.pg_available_extension_versions extv\n/// on extv.name = ext.name and extv.installed\n/// where\n/// ext.installed_version is not null\n/// and ext.default_version is not null\n/// and ext.installed_version != ext.default_version\n/// order by\n/// ext.name)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"extensionVersionsOutdated\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub ExtensionVersionsOutdated { version : "1.0.0" , name : "extensionVersionsOutdated" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Extension Versions Outdated\n\nDetects extensions that are not using the default (recommended) version.\n\n## SQL Query\n\n```sql\n(\nselect\n 'extension_versions_outdated' as \"name!\",\n 'Extension Versions Outdated' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects extensions that are not using the default (recommended) version.' as \"description!\",\n format(\n 'Extension `%s` is using version `%s` but version `%s` is available. Using outdated extension versions may expose the database to security vulnerabilities.',\n ext.name,\n ext.installed_version,\n ext.default_version\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0022_extension_versions_outdated' as \"remediation!\",\n jsonb_build_object(\n 'extension_name', ext.name,\n 'installed_version', ext.installed_version,\n 'default_version', ext.default_version\n ) as \"metadata!\",\n format(\n 'extension_versions_outdated_%s_%s',\n ext.name,\n ext.installed_version\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_available_extensions ext\njoin\n -- ignore versions not in pg_available_extension_versions\n -- e.g. residue of pg_upgrade\n pg_catalog.pg_available_extension_versions extv\n on extv.name = ext.name and extv.installed\nwhere\n ext.installed_version is not null\n and ext.default_version is not null\n and ext.installed_version != ext.default_version\norder by\n ext.name)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"extensionVersionsOutdated\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub ExtensionVersionsOutdated { version : "1.0.0" , name : "extensionVersionsOutdated" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for ExtensionVersionsOutdated { const SQL_FILE_PATH: &'static str = "security/extension_versions_outdated.sql"; const DESCRIPTION: &'static str = diff --git a/crates/pgls_splinter/src/rules/security/fkey_to_auth_unique.rs b/crates/pgls_splinter/src/rules/security/fkey_to_auth_unique.rs index 53dd84031..80cfac9d2 100644 --- a/crates/pgls_splinter/src/rules/security/fkey_to_auth_unique.rs +++ b/crates/pgls_splinter/src/rules/security/fkey_to_auth_unique.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Foreign Key to Auth Unique Constraint\n///\n/// Detects user defined foreign keys to unique constraints in the auth schema.\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'fkey_to_auth_unique' as \"name!\",\n/// 'Foreign Key to Auth Unique Constraint' as \"title!\",\n/// 'ERROR' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects user defined foreign keys to unique constraints in the auth schema.' as \"description!\",\n/// format(\n/// 'Table `%s`.`%s` has a foreign key `%s` referencing an auth unique constraint',\n/// n.nspname, -- referencing schema\n/// c_rel.relname, -- referencing table\n/// c.conname -- fkey name\n/// ) as \"detail!\",\n/// 'Drop the foreign key constraint that references the auth schema.' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c_rel.relname,\n/// 'foreign_key', c.conname\n/// ) as \"metadata!\",\n/// format(\n/// 'fkey_to_auth_unique_%s_%s_%s',\n/// n.nspname, -- referencing schema\n/// c_rel.relname, -- referencing table\n/// c.conname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_constraint c\n/// join pg_catalog.pg_class c_rel\n/// on c.conrelid = c_rel.oid\n/// join pg_catalog.pg_namespace n\n/// on c_rel.relnamespace = n.oid\n/// join pg_catalog.pg_class ref_rel\n/// on c.confrelid = ref_rel.oid\n/// join pg_catalog.pg_namespace cn\n/// on ref_rel.relnamespace = cn.oid\n/// join pg_catalog.pg_index i\n/// on c.conindid = i.indexrelid\n/// where c.contype = 'f'\n/// and cn.nspname = 'auth'\n/// and i.indisunique\n/// and not i.indisprimary)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"fkeyToAuthUnique\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub FkeyToAuthUnique { version : "1.0.0" , name : "fkeyToAuthUnique" , severity : pgls_diagnostics :: Severity :: Error , } } +::pgls_analyse::declare_rule! { # [doc = "# Foreign Key to Auth Unique Constraint\n\nDetects user defined foreign keys to unique constraints in the auth schema.\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nselect\n 'fkey_to_auth_unique' as \"name!\",\n 'Foreign Key to Auth Unique Constraint' as \"title!\",\n 'ERROR' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects user defined foreign keys to unique constraints in the auth schema.' as \"description!\",\n format(\n 'Table `%s`.`%s` has a foreign key `%s` referencing an auth unique constraint',\n n.nspname, -- referencing schema\n c_rel.relname, -- referencing table\n c.conname -- fkey name\n ) as \"detail!\",\n 'Drop the foreign key constraint that references the auth schema.' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c_rel.relname,\n 'foreign_key', c.conname\n ) as \"metadata!\",\n format(\n 'fkey_to_auth_unique_%s_%s_%s',\n n.nspname, -- referencing schema\n c_rel.relname, -- referencing table\n c.conname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_constraint c\n join pg_catalog.pg_class c_rel\n on c.conrelid = c_rel.oid\n join pg_catalog.pg_namespace n\n on c_rel.relnamespace = n.oid\n join pg_catalog.pg_class ref_rel\n on c.confrelid = ref_rel.oid\n join pg_catalog.pg_namespace cn\n on ref_rel.relnamespace = cn.oid\n join pg_catalog.pg_index i\n on c.conindid = i.indexrelid\nwhere c.contype = 'f'\n and cn.nspname = 'auth'\n and i.indisunique\n and not i.indisprimary)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"fkeyToAuthUnique\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub FkeyToAuthUnique { version : "1.0.0" , name : "fkeyToAuthUnique" , severity : pgls_diagnostics :: Severity :: Error , recommended : true , } } impl SplinterRule for FkeyToAuthUnique { const SQL_FILE_PATH: &'static str = "security/fkey_to_auth_unique.sql"; const DESCRIPTION: &'static str = diff --git a/crates/pgls_splinter/src/rules/security/foreign_table_in_api.rs b/crates/pgls_splinter/src/rules/security/foreign_table_in_api.rs index 808b07742..a9d5fd704 100644 --- a/crates/pgls_splinter/src/rules/security/foreign_table_in_api.rs +++ b/crates/pgls_splinter/src/rules/security/foreign_table_in_api.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Foreign Table in API\n///\n/// Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies.\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'foreign_table_in_api' as \"name!\",\n/// 'Foreign Table in API' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies.' as \"description!\",\n/// format(\n/// 'Foreign table \\`%s.%s\\` is accessible over APIs',\n/// n.nspname,\n/// c.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0017_foreign_table_in_api' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'foreign table'\n/// ) as \"metadata!\",\n/// format(\n/// 'foreign_table_in_api_%s_%s',\n/// n.nspname,\n/// c.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_class c\n/// join pg_catalog.pg_namespace n\n/// on n.oid = c.relnamespace\n/// left join pg_catalog.pg_depend dep\n/// on c.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// c.relkind = 'f'\n/// and (\n/// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n/// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n/// )\n/// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and dep.objid is null)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"foreignTableInApi\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub ForeignTableInApi { version : "1.0.0" , name : "foreignTableInApi" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Foreign Table in API\n\nDetects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies.\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nselect\n 'foreign_table_in_api' as \"name!\",\n 'Foreign Table in API' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies.' as \"description!\",\n format(\n 'Foreign table \\`%s.%s\\` is accessible over APIs',\n n.nspname,\n c.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0017_foreign_table_in_api' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'foreign table'\n ) as \"metadata!\",\n format(\n 'foreign_table_in_api_%s_%s',\n n.nspname,\n c.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n\n on n.oid = c.relnamespace\n left join pg_catalog.pg_depend dep\n on c.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n c.relkind = 'f'\n and (\n pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n )\n and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and dep.objid is null)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"foreignTableInApi\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub ForeignTableInApi { version : "1.0.0" , name : "foreignTableInApi" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for ForeignTableInApi { const SQL_FILE_PATH: &'static str = "security/foreign_table_in_api.sql"; const DESCRIPTION: &'static str = "Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies."; diff --git a/crates/pgls_splinter/src/rules/security/function_search_path_mutable.rs b/crates/pgls_splinter/src/rules/security/function_search_path_mutable.rs index 6ea976151..fab4776c2 100644 --- a/crates/pgls_splinter/src/rules/security/function_search_path_mutable.rs +++ b/crates/pgls_splinter/src/rules/security/function_search_path_mutable.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Function Search Path Mutable\n///\n/// Detects functions where the search_path parameter is not set.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'function_search_path_mutable' as \"name!\",\n/// 'Function Search Path Mutable' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects functions where the search_path parameter is not set.' as \"description!\",\n/// format(\n/// 'Function \\`%s.%s\\` has a role mutable search_path',\n/// n.nspname,\n/// p.proname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0011_function_search_path_mutable' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', p.proname,\n/// 'type', 'function'\n/// ) as \"metadata!\",\n/// format(\n/// 'function_search_path_mutable_%s_%s_%s',\n/// n.nspname,\n/// p.proname,\n/// md5(p.prosrc) -- required when function is polymorphic\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_proc p\n/// join pg_catalog.pg_namespace n\n/// on p.pronamespace = n.oid\n/// left join pg_catalog.pg_depend dep\n/// on p.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and dep.objid is null -- exclude functions owned by extensions\n/// -- Search path not set\n/// and not exists (\n/// select 1\n/// from unnest(coalesce(p.proconfig, '{}')) as config\n/// where config like 'search_path=%'\n/// ))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"functionSearchPathMutable\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub FunctionSearchPathMutable { version : "1.0.0" , name : "functionSearchPathMutable" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Function Search Path Mutable\n\nDetects functions where the search_path parameter is not set.\n\n## SQL Query\n\n```sql\n(\nselect\n 'function_search_path_mutable' as \"name!\",\n 'Function Search Path Mutable' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects functions where the search_path parameter is not set.' as \"description!\",\n format(\n 'Function \\`%s.%s\\` has a role mutable search_path',\n n.nspname,\n p.proname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0011_function_search_path_mutable' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', p.proname,\n 'type', 'function'\n ) as \"metadata!\",\n format(\n 'function_search_path_mutable_%s_%s_%s',\n n.nspname,\n p.proname,\n md5(p.prosrc) -- required when function is polymorphic\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_proc p\n join pg_catalog.pg_namespace n\n on p.pronamespace = n.oid\n left join pg_catalog.pg_depend dep\n on p.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and dep.objid is null -- exclude functions owned by extensions\n -- Search path not set\n and not exists (\n select 1\n from unnest(coalesce(p.proconfig, '{}')) as config\n where config like 'search_path=%'\n ))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"functionSearchPathMutable\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub FunctionSearchPathMutable { version : "1.0.0" , name : "functionSearchPathMutable" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for FunctionSearchPathMutable { const SQL_FILE_PATH: &'static str = "security/function_search_path_mutable.sql"; const DESCRIPTION: &'static str = diff --git a/crates/pgls_splinter/src/rules/security/insecure_queue_exposed_in_api.rs b/crates/pgls_splinter/src/rules/security/insecure_queue_exposed_in_api.rs index a739cb138..d347abff6 100644 --- a/crates/pgls_splinter/src/rules/security/insecure_queue_exposed_in_api.rs +++ b/crates/pgls_splinter/src/rules/security/insecure_queue_exposed_in_api.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Insecure Queue Exposed in API\n///\n/// Detects cases where an insecure Queue is exposed over Data APIs\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'insecure_queue_exposed_in_api' as \"name!\",\n/// 'Insecure Queue Exposed in API' as \"title!\",\n/// 'ERROR' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects cases where an insecure Queue is exposed over Data APIs' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` is public, but RLS has not been enabled.',\n/// n.nspname,\n/// c.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0019_insecure_queue_exposed_in_api' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'rls_disabled_in_public_%s_%s',\n/// n.nspname,\n/// c.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_class c\n/// join pg_catalog.pg_namespace n\n/// on c.relnamespace = n.oid\n/// where\n/// c.relkind in ('r', 'I') -- regular or partitioned tables\n/// and not c.relrowsecurity -- RLS is disabled\n/// and (\n/// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n/// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n/// )\n/// and n.nspname = 'pgmq' -- tables in the pgmq schema\n/// and c.relname like 'q_%' -- only queue tables\n/// -- Constant requirements\n/// and 'pgmq_public' = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"insecureQueueExposedInApi\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub InsecureQueueExposedInApi { version : "1.0.0" , name : "insecureQueueExposedInApi" , severity : pgls_diagnostics :: Severity :: Error , } } +::pgls_analyse::declare_rule! { # [doc = "# Insecure Queue Exposed in API\n\nDetects cases where an insecure Queue is exposed over Data APIs\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nselect\n 'insecure_queue_exposed_in_api' as \"name!\",\n 'Insecure Queue Exposed in API' as \"title!\",\n 'ERROR' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects cases where an insecure Queue is exposed over Data APIs' as \"description!\",\n format(\n 'Table \\`%s.%s\\` is public, but RLS has not been enabled.',\n n.nspname,\n c.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0019_insecure_queue_exposed_in_api' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'rls_disabled_in_public_%s_%s',\n n.nspname,\n c.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n\n on c.relnamespace = n.oid\nwhere\n c.relkind in ('r', 'I') -- regular or partitioned tables\n and not c.relrowsecurity -- RLS is disabled\n and (\n pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n )\n and n.nspname = 'pgmq' -- tables in the pgmq schema\n and c.relname like 'q_%' -- only queue tables\n -- Constant requirements\n and 'pgmq_public' = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ','))))))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"insecureQueueExposedInApi\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub InsecureQueueExposedInApi { version : "1.0.0" , name : "insecureQueueExposedInApi" , severity : pgls_diagnostics :: Severity :: Error , recommended : true , } } impl SplinterRule for InsecureQueueExposedInApi { const SQL_FILE_PATH: &'static str = "security/insecure_queue_exposed_in_api.sql"; const DESCRIPTION: &'static str = diff --git a/crates/pgls_splinter/src/rules/security/materialized_view_in_api.rs b/crates/pgls_splinter/src/rules/security/materialized_view_in_api.rs index b659eafd5..d86bcda27 100644 --- a/crates/pgls_splinter/src/rules/security/materialized_view_in_api.rs +++ b/crates/pgls_splinter/src/rules/security/materialized_view_in_api.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Materialized View in API\n///\n/// Detects materialized views that are accessible over the Data APIs.\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'materialized_view_in_api' as \"name!\",\n/// 'Materialized View in API' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects materialized views that are accessible over the Data APIs.' as \"description!\",\n/// format(\n/// 'Materialized view \\`%s.%s\\` is selectable by anon or authenticated roles',\n/// n.nspname,\n/// c.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0016_materialized_view_in_api' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'materialized view'\n/// ) as \"metadata!\",\n/// format(\n/// 'materialized_view_in_api_%s_%s',\n/// n.nspname,\n/// c.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_class c\n/// join pg_catalog.pg_namespace n\n/// on n.oid = c.relnamespace\n/// left join pg_catalog.pg_depend dep\n/// on c.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// c.relkind = 'm'\n/// and (\n/// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n/// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n/// )\n/// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and dep.objid is null)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"materializedViewInApi\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub MaterializedViewInApi { version : "1.0.0" , name : "materializedViewInApi" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Materialized View in API\n\nDetects materialized views that are accessible over the Data APIs.\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nselect\n 'materialized_view_in_api' as \"name!\",\n 'Materialized View in API' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects materialized views that are accessible over the Data APIs.' as \"description!\",\n format(\n 'Materialized view \\`%s.%s\\` is selectable by anon or authenticated roles',\n n.nspname,\n c.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0016_materialized_view_in_api' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'materialized view'\n ) as \"metadata!\",\n format(\n 'materialized_view_in_api_%s_%s',\n n.nspname,\n c.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n\n on n.oid = c.relnamespace\n left join pg_catalog.pg_depend dep\n on c.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n c.relkind = 'm'\n and (\n pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n )\n and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and dep.objid is null)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"materializedViewInApi\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub MaterializedViewInApi { version : "1.0.0" , name : "materializedViewInApi" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for MaterializedViewInApi { const SQL_FILE_PATH: &'static str = "security/materialized_view_in_api.sql"; const DESCRIPTION: &'static str = diff --git a/crates/pgls_splinter/src/rules/security/policy_exists_rls_disabled.rs b/crates/pgls_splinter/src/rules/security/policy_exists_rls_disabled.rs index 60cfc98a2..88429c992 100644 --- a/crates/pgls_splinter/src/rules/security/policy_exists_rls_disabled.rs +++ b/crates/pgls_splinter/src/rules/security/policy_exists_rls_disabled.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Policy Exists RLS Disabled\n///\n/// Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'policy_exists_rls_disabled' as \"name!\",\n/// 'Policy Exists RLS Disabled' as \"title!\",\n/// 'ERROR' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table.' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` has RLS policies but RLS is not enabled on the table. Policies include %s.',\n/// n.nspname,\n/// c.relname,\n/// array_agg(p.polname order by p.polname)\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0007_policy_exists_rls_disabled' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'policy_exists_rls_disabled_%s_%s',\n/// n.nspname,\n/// c.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_policy p\n/// join pg_catalog.pg_class c\n/// on p.polrelid = c.oid\n/// join pg_catalog.pg_namespace n\n/// on c.relnamespace = n.oid\n/// left join pg_catalog.pg_depend dep\n/// on c.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// c.relkind = 'r' -- regular tables\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// -- RLS is disabled\n/// and not c.relrowsecurity\n/// and dep.objid is null -- exclude tables owned by extensions\n/// group by\n/// n.nspname,\n/// c.relname)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"policyExistsRlsDisabled\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub PolicyExistsRlsDisabled { version : "1.0.0" , name : "policyExistsRlsDisabled" , severity : pgls_diagnostics :: Severity :: Error , } } +::pgls_analyse::declare_rule! { # [doc = "# Policy Exists RLS Disabled\n\nDetects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table.\n\n## SQL Query\n\n```sql\n(\nselect\n 'policy_exists_rls_disabled' as \"name!\",\n 'Policy Exists RLS Disabled' as \"title!\",\n 'ERROR' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table.' as \"description!\",\n format(\n 'Table \\`%s.%s\\` has RLS policies but RLS is not enabled on the table. Policies include %s.',\n n.nspname,\n c.relname,\n array_agg(p.polname order by p.polname)\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0007_policy_exists_rls_disabled' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'policy_exists_rls_disabled_%s_%s',\n n.nspname,\n c.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_policy p\n join pg_catalog.pg_class c\n on p.polrelid = c.oid\n join pg_catalog.pg_namespace n\n on c.relnamespace = n.oid\n left join pg_catalog.pg_depend dep\n on c.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n c.relkind = 'r' -- regular tables\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n -- RLS is disabled\n and not c.relrowsecurity\n and dep.objid is null -- exclude tables owned by extensions\ngroup by\n n.nspname,\n c.relname)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"policyExistsRlsDisabled\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub PolicyExistsRlsDisabled { version : "1.0.0" , name : "policyExistsRlsDisabled" , severity : pgls_diagnostics :: Severity :: Error , recommended : true , } } impl SplinterRule for PolicyExistsRlsDisabled { const SQL_FILE_PATH: &'static str = "security/policy_exists_rls_disabled.sql"; const DESCRIPTION: &'static str = "Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table."; diff --git a/crates/pgls_splinter/src/rules/security/rls_disabled_in_public.rs b/crates/pgls_splinter/src/rules/security/rls_disabled_in_public.rs index b441f4619..8afdd604b 100644 --- a/crates/pgls_splinter/src/rules/security/rls_disabled_in_public.rs +++ b/crates/pgls_splinter/src/rules/security/rls_disabled_in_public.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # RLS Disabled in Public\n///\n/// Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'rls_disabled_in_public' as \"name!\",\n/// 'RLS Disabled in Public' as \"title!\",\n/// 'ERROR' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` is public, but RLS has not been enabled.',\n/// n.nspname,\n/// c.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0013_rls_disabled_in_public' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'rls_disabled_in_public_%s_%s',\n/// n.nspname,\n/// c.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_class c\n/// join pg_catalog.pg_namespace n\n/// on c.relnamespace = n.oid\n/// where\n/// c.relkind = 'r' -- regular tables\n/// -- RLS is disabled\n/// and not c.relrowsecurity\n/// and (\n/// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n/// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n/// )\n/// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// ))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"rlsDisabledInPublic\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub RlsDisabledInPublic { version : "1.0.0" , name : "rlsDisabledInPublic" , severity : pgls_diagnostics :: Severity :: Error , } } +::pgls_analyse::declare_rule! { # [doc = "# RLS Disabled in Public\n\nDetects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nselect\n 'rls_disabled_in_public' as \"name!\",\n 'RLS Disabled in Public' as \"title!\",\n 'ERROR' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST' as \"description!\",\n format(\n 'Table \\`%s.%s\\` is public, but RLS has not been enabled.',\n n.nspname,\n c.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0013_rls_disabled_in_public' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'rls_disabled_in_public_%s_%s',\n n.nspname,\n c.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n\n on c.relnamespace = n.oid\nwhere\n c.relkind = 'r' -- regular tables\n -- RLS is disabled\n and not c.relrowsecurity\n and (\n pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n )\n and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n ))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"rlsDisabledInPublic\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub RlsDisabledInPublic { version : "1.0.0" , name : "rlsDisabledInPublic" , severity : pgls_diagnostics :: Severity :: Error , recommended : true , } } impl SplinterRule for RlsDisabledInPublic { const SQL_FILE_PATH: &'static str = "security/rls_disabled_in_public.sql"; const DESCRIPTION: &'static str = "Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST"; diff --git a/crates/pgls_splinter/src/rules/security/rls_enabled_no_policy.rs b/crates/pgls_splinter/src/rules/security/rls_enabled_no_policy.rs index 1f58b7789..b2b4af427 100644 --- a/crates/pgls_splinter/src/rules/security/rls_enabled_no_policy.rs +++ b/crates/pgls_splinter/src/rules/security/rls_enabled_no_policy.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # RLS Enabled No Policy\n///\n/// Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'rls_enabled_no_policy' as \"name!\",\n/// 'RLS Enabled No Policy' as \"title!\",\n/// 'INFO' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created.' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` has RLS enabled, but no policies exist',\n/// n.nspname,\n/// c.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0008_rls_enabled_no_policy' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'rls_enabled_no_policy_%s_%s',\n/// n.nspname,\n/// c.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_class c\n/// left join pg_catalog.pg_policy p\n/// on p.polrelid = c.oid\n/// join pg_catalog.pg_namespace n\n/// on c.relnamespace = n.oid\n/// left join pg_catalog.pg_depend dep\n/// on c.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// c.relkind = 'r' -- regular tables\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// -- RLS is enabled\n/// and c.relrowsecurity\n/// and p.polname is null\n/// and dep.objid is null -- exclude tables owned by extensions\n/// group by\n/// n.nspname,\n/// c.relname)\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"rlsEnabledNoPolicy\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub RlsEnabledNoPolicy { version : "1.0.0" , name : "rlsEnabledNoPolicy" , severity : pgls_diagnostics :: Severity :: Information , } } +::pgls_analyse::declare_rule! { # [doc = "# RLS Enabled No Policy\n\nDetects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created.\n\n## SQL Query\n\n```sql\n(\nselect\n 'rls_enabled_no_policy' as \"name!\",\n 'RLS Enabled No Policy' as \"title!\",\n 'INFO' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created.' as \"description!\",\n format(\n 'Table \\`%s.%s\\` has RLS enabled, but no policies exist',\n n.nspname,\n c.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0008_rls_enabled_no_policy' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'rls_enabled_no_policy_%s_%s',\n n.nspname,\n c.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_class c\n left join pg_catalog.pg_policy p\n on p.polrelid = c.oid\n join pg_catalog.pg_namespace n\n on c.relnamespace = n.oid\n left join pg_catalog.pg_depend dep\n on c.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n c.relkind = 'r' -- regular tables\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n -- RLS is enabled\n and c.relrowsecurity\n and p.polname is null\n and dep.objid is null -- exclude tables owned by extensions\ngroup by\n n.nspname,\n c.relname)\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"rlsEnabledNoPolicy\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub RlsEnabledNoPolicy { version : "1.0.0" , name : "rlsEnabledNoPolicy" , severity : pgls_diagnostics :: Severity :: Information , recommended : true , } } impl SplinterRule for RlsEnabledNoPolicy { const SQL_FILE_PATH: &'static str = "security/rls_enabled_no_policy.sql"; const DESCRIPTION: &'static str = "Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created."; diff --git a/crates/pgls_splinter/src/rules/security/rls_references_user_metadata.rs b/crates/pgls_splinter/src/rules/security/rls_references_user_metadata.rs index a43c1dcf8..39b43f850 100644 --- a/crates/pgls_splinter/src/rules/security/rls_references_user_metadata.rs +++ b/crates/pgls_splinter/src/rules/security/rls_references_user_metadata.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # RLS references user metadata\n///\n/// Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy.\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// with policies as (\n/// select\n/// nsp.nspname as schema_name,\n/// pb.tablename as table_name,\n/// polname as policy_name,\n/// qual,\n/// with_check\n/// from\n/// pg_catalog.pg_policy pa\n/// join pg_catalog.pg_class pc\n/// on pa.polrelid = pc.oid\n/// join pg_catalog.pg_namespace nsp\n/// on pc.relnamespace = nsp.oid\n/// join pg_catalog.pg_policies pb\n/// on pc.relname = pb.tablename\n/// and nsp.nspname = pb.schemaname\n/// and pa.polname = pb.policyname\n/// )\n/// select\n/// 'rls_references_user_metadata' as \"name!\",\n/// 'RLS references user metadata' as \"title!\",\n/// 'ERROR' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy.' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` has a row level security policy \\`%s\\` that references Supabase Auth \\`user_metadata\\`. \\`user_metadata\\` is editable by end users and should never be used in a security context.',\n/// schema_name,\n/// table_name,\n/// policy_name\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0015_rls_references_user_metadata' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', schema_name,\n/// 'name', table_name,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format('rls_references_user_metadata_%s_%s_%s', schema_name, table_name, policy_name) as \"cache_key!\"\n/// from\n/// policies\n/// where\n/// schema_name not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and (\n/// -- Example: auth.jwt() -> 'user_metadata'\n/// -- False positives are possible, but it isn't practical to string match\n/// -- If false positive rate is too high, this expression can iterate\n/// qual like '%auth.jwt()%user_metadata%'\n/// or qual like '%current_setting(%request.jwt.claims%)%user_metadata%'\n/// or with_check like '%auth.jwt()%user_metadata%'\n/// or with_check like '%current_setting(%request.jwt.claims%)%user_metadata%'\n/// ))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"rlsReferencesUserMetadata\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub RlsReferencesUserMetadata { version : "1.0.0" , name : "rlsReferencesUserMetadata" , severity : pgls_diagnostics :: Severity :: Error , } } +::pgls_analyse::declare_rule! { # [doc = "# RLS references user metadata\n\nDetects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy.\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nwith policies as (\n select\n nsp.nspname as schema_name,\n pb.tablename as table_name,\n polname as policy_name,\n qual,\n with_check\n from\n pg_catalog.pg_policy pa\n join pg_catalog.pg_class pc\n on pa.polrelid = pc.oid\n join pg_catalog.pg_namespace nsp\n on pc.relnamespace = nsp.oid\n join pg_catalog.pg_policies pb\n on pc.relname = pb.tablename\n and nsp.nspname = pb.schemaname\n and pa.polname = pb.policyname\n)\nselect\n 'rls_references_user_metadata' as \"name!\",\n 'RLS references user metadata' as \"title!\",\n 'ERROR' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy.' as \"description!\",\n format(\n 'Table \\`%s.%s\\` has a row level security policy \\`%s\\` that references Supabase Auth \\`user_metadata\\`. \\`user_metadata\\` is editable by end users and should never be used in a security context.',\n schema_name,\n table_name,\n policy_name\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0015_rls_references_user_metadata' as \"remediation!\",\n jsonb_build_object(\n 'schema', schema_name,\n 'name', table_name,\n 'type', 'table'\n ) as \"metadata!\",\n format('rls_references_user_metadata_%s_%s_%s', schema_name, table_name, policy_name) as \"cache_key!\"\nfrom\n policies\nwhere\n schema_name not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and (\n -- Example: auth.jwt() -> 'user_metadata'\n -- False positives are possible, but it isn't practical to string match\n -- If false positive rate is too high, this expression can iterate\n qual like '%auth.jwt()%user_metadata%'\n or qual like '%current_setting(%request.jwt.claims%)%user_metadata%'\n or with_check like '%auth.jwt()%user_metadata%'\n or with_check like '%current_setting(%request.jwt.claims%)%user_metadata%'\n ))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"rlsReferencesUserMetadata\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub RlsReferencesUserMetadata { version : "1.0.0" , name : "rlsReferencesUserMetadata" , severity : pgls_diagnostics :: Severity :: Error , recommended : true , } } impl SplinterRule for RlsReferencesUserMetadata { const SQL_FILE_PATH: &'static str = "security/rls_references_user_metadata.sql"; const DESCRIPTION: &'static str = "Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy."; diff --git a/crates/pgls_splinter/src/rules/security/security_definer_view.rs b/crates/pgls_splinter/src/rules/security/security_definer_view.rs index f36b73670..e8f0f82ad 100644 --- a/crates/pgls_splinter/src/rules/security/security_definer_view.rs +++ b/crates/pgls_splinter/src/rules/security/security_definer_view.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Security Definer View\n///\n/// Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'security_definer_view' as \"name!\",\n/// 'Security Definer View' as \"title!\",\n/// 'ERROR' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user' as \"description!\",\n/// format(\n/// 'View \\`%s.%s\\` is defined with the SECURITY DEFINER property',\n/// n.nspname,\n/// c.relname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=0010_security_definer_view' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'type', 'view'\n/// ) as \"metadata!\",\n/// format(\n/// 'security_definer_view_%s_%s',\n/// n.nspname,\n/// c.relname\n/// ) as \"cache_key!\"\n/// from\n/// pg_catalog.pg_class c\n/// join pg_catalog.pg_namespace n\n/// on n.oid = c.relnamespace\n/// left join pg_catalog.pg_depend dep\n/// on c.oid = dep.objid\n/// and dep.deptype = 'e'\n/// where\n/// c.relkind = 'v'\n/// and (\n/// pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n/// or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n/// )\n/// and substring(pg_catalog.version() from 'PostgreSQL ([0-9]+)') >= '15' -- security invoker was added in pg15\n/// and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n/// and n.nspname not in (\n/// '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n/// )\n/// and dep.objid is null -- exclude views owned by extensions\n/// and not (\n/// lower(coalesce(c.reloptions::text,'{}'))::text[]\n/// && array[\n/// 'security_invoker=1',\n/// 'security_invoker=true',\n/// 'security_invoker=yes',\n/// 'security_invoker=on'\n/// ]\n/// ))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"securityDefinerView\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub SecurityDefinerView { version : "1.0.0" , name : "securityDefinerView" , severity : pgls_diagnostics :: Severity :: Error , } } +::pgls_analyse::declare_rule! { # [doc = "# Security Definer View\n\nDetects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.\n\n## SQL Query\n\n```sql\n(\nselect\n 'security_definer_view' as \"name!\",\n 'Security Definer View' as \"title!\",\n 'ERROR' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user' as \"description!\",\n format(\n 'View \\`%s.%s\\` is defined with the SECURITY DEFINER property',\n n.nspname,\n c.relname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=0010_security_definer_view' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'type', 'view'\n ) as \"metadata!\",\n format(\n 'security_definer_view_%s_%s',\n n.nspname,\n c.relname\n ) as \"cache_key!\"\nfrom\n pg_catalog.pg_class c\n join pg_catalog.pg_namespace n\n on n.oid = c.relnamespace\n left join pg_catalog.pg_depend dep\n on c.oid = dep.objid\n and dep.deptype = 'e'\nwhere\n c.relkind = 'v'\n and (\n pg_catalog.has_table_privilege('anon', c.oid, 'SELECT')\n or pg_catalog.has_table_privilege('authenticated', c.oid, 'SELECT')\n )\n and substring(pg_catalog.version() from 'PostgreSQL ([0-9]+)') >= '15' -- security invoker was added in pg15\n and n.nspname = any(array(select trim(unnest(string_to_array(current_setting('pgrst.db_schemas', 't'), ',')))))\n and n.nspname not in (\n '_timescaledb_cache', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', 'auth', 'cron', 'extensions', 'graphql', 'graphql_public', 'information_schema', 'net', 'pgmq', 'pgroonga', 'pgsodium', 'pgsodium_masks', 'pgtle', 'pgbouncer', 'pg_catalog', 'pgtle', 'realtime', 'repack', 'storage', 'supabase_functions', 'supabase_migrations', 'tiger', 'topology', 'vault'\n )\n and dep.objid is null -- exclude views owned by extensions\n and not (\n lower(coalesce(c.reloptions::text,'{}'))::text[]\n && array[\n 'security_invoker=1',\n 'security_invoker=true',\n 'security_invoker=yes',\n 'security_invoker=on'\n ]\n ))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"securityDefinerView\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub SecurityDefinerView { version : "1.0.0" , name : "securityDefinerView" , severity : pgls_diagnostics :: Severity :: Error , recommended : true , } } impl SplinterRule for SecurityDefinerView { const SQL_FILE_PATH: &'static str = "security/security_definer_view.sql"; const DESCRIPTION: &'static str = "Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user"; diff --git a/crates/pgls_splinter/src/rules/security/unsupported_reg_types.rs b/crates/pgls_splinter/src/rules/security/unsupported_reg_types.rs index 0d1008df1..8068b4f50 100644 --- a/crates/pgls_splinter/src/rules/security/unsupported_reg_types.rs +++ b/crates/pgls_splinter/src/rules/security/unsupported_reg_types.rs @@ -2,7 +2,7 @@ #![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] use crate::rule::SplinterRule; -::pgls_analyse::declare_rule! { # [doc = "/// # Unsupported reg types\n///\n/// Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade.\n///\n/// ## SQL Query\n///\n/// ```sql\n/// (\n/// select\n/// 'unsupported_reg_types' as \"name!\",\n/// 'Unsupported reg types' as \"title!\",\n/// 'WARN' as \"level!\",\n/// 'EXTERNAL' as \"facing!\",\n/// array['SECURITY'] as \"categories!\",\n/// 'Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade.' as \"description!\",\n/// format(\n/// 'Table \\`%s.%s\\` has a column \\`%s\\` with unsupported reg* type \\`%s\\`.',\n/// n.nspname,\n/// c.relname,\n/// a.attname,\n/// t.typname\n/// ) as \"detail!\",\n/// 'https://supabase.com/docs/guides/database/database-linter?lint=unsupported_reg_types' as \"remediation!\",\n/// jsonb_build_object(\n/// 'schema', n.nspname,\n/// 'name', c.relname,\n/// 'column', a.attname,\n/// 'type', 'table'\n/// ) as \"metadata!\",\n/// format(\n/// 'unsupported_reg_types_%s_%s_%s',\n/// n.nspname,\n/// c.relname,\n/// a.attname\n/// ) AS cache_key\n/// from\n/// pg_catalog.pg_attribute a\n/// join pg_catalog.pg_class c\n/// on a.attrelid = c.oid\n/// join pg_catalog.pg_namespace n\n/// on c.relnamespace = n.oid\n/// join pg_catalog.pg_type t\n/// on a.atttypid = t.oid\n/// join pg_catalog.pg_namespace tn\n/// on t.typnamespace = tn.oid\n/// where\n/// tn.nspname = 'pg_catalog'\n/// and t.typname in ('regcollation', 'regconfig', 'regdictionary', 'regnamespace', 'regoper', 'regoperator', 'regproc', 'regprocedure')\n/// and n.nspname not in ('pg_catalog', 'information_schema', 'pgsodium'))\n/// ```\n///\n/// ## Configuration\n///\n/// Enable or disable this rule in your configuration:\n///\n/// ```json\n/// {\n/// \"splinter\": {\n/// \"rules\": {\n/// \"security\": {\n/// \"unsupportedRegTypes\": \"warn\"\n/// }\n/// }\n/// }\n/// }\n/// ```\n///\n/// ## Remediation\n///\n/// See: "] pub UnsupportedRegTypes { version : "1.0.0" , name : "unsupportedRegTypes" , severity : pgls_diagnostics :: Severity :: Warning , } } +::pgls_analyse::declare_rule! { # [doc = "# Unsupported reg types\n\nIdentifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade.\n\n## SQL Query\n\n```sql\n(\nselect\n 'unsupported_reg_types' as \"name!\",\n 'Unsupported reg types' as \"title!\",\n 'WARN' as \"level!\",\n 'EXTERNAL' as \"facing!\",\n array['SECURITY'] as \"categories!\",\n 'Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade.' as \"description!\",\n format(\n 'Table \\`%s.%s\\` has a column \\`%s\\` with unsupported reg* type \\`%s\\`.',\n n.nspname,\n c.relname,\n a.attname,\n t.typname\n ) as \"detail!\",\n 'https://supabase.com/docs/guides/database/database-linter?lint=unsupported_reg_types' as \"remediation!\",\n jsonb_build_object(\n 'schema', n.nspname,\n 'name', c.relname,\n 'column', a.attname,\n 'type', 'table'\n ) as \"metadata!\",\n format(\n 'unsupported_reg_types_%s_%s_%s',\n n.nspname,\n c.relname,\n a.attname\n ) AS cache_key\nfrom\n pg_catalog.pg_attribute a\n join pg_catalog.pg_class c\n on a.attrelid = c.oid\n join pg_catalog.pg_namespace n\n on c.relnamespace = n.oid\n join pg_catalog.pg_type t\n on a.atttypid = t.oid\n join pg_catalog.pg_namespace tn\n on t.typnamespace = tn.oid\nwhere\n tn.nspname = 'pg_catalog'\n and t.typname in ('regcollation', 'regconfig', 'regdictionary', 'regnamespace', 'regoper', 'regoperator', 'regproc', 'regprocedure')\n and n.nspname not in ('pg_catalog', 'information_schema', 'pgsodium'))\n```\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"splinter\": {\n \"rules\": {\n \"security\": {\n \"unsupportedRegTypes\": \"warn\"\n }\n }\n }\n}\n```\n\n## Remediation\n\nSee: "] pub UnsupportedRegTypes { version : "1.0.0" , name : "unsupportedRegTypes" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } impl SplinterRule for UnsupportedRegTypes { const SQL_FILE_PATH: &'static str = "security/unsupported_reg_types.sql"; const DESCRIPTION: &'static str = "Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade."; diff --git a/crates/pgls_workspace/Cargo.toml b/crates/pgls_workspace/Cargo.toml index 5264208f5..c90faa1a8 100644 --- a/crates/pgls_workspace/Cargo.toml +++ b/crates/pgls_workspace/Cargo.toml @@ -32,6 +32,7 @@ pgls_plpgsql_check = { workspace = true } pgls_query = { workspace = true } pgls_query_ext = { workspace = true } pgls_schema_cache = { workspace = true } +pgls_splinter = { workspace = true } pgls_statement_splitter = { workspace = true } pgls_suppressions = { workspace = true } pgls_text_size.workspace = true diff --git a/crates/pgls_workspace/src/features/diagnostics.rs b/crates/pgls_workspace/src/features/diagnostics.rs index 2ca3132a0..e09bea76a 100644 --- a/crates/pgls_workspace/src/features/diagnostics.rs +++ b/crates/pgls_workspace/src/features/diagnostics.rs @@ -22,5 +22,8 @@ pub struct PullDiagnosticsResult { #[derive(Debug, serde::Serialize, serde::Deserialize)] #[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] pub struct PullDatabaseDiagnosticsParams { + pub categories: RuleCategories, pub max_diagnostics: u32, + pub only: Vec, + pub skip: Vec, } diff --git a/crates/pgls_workspace/src/settings.rs b/crates/pgls_workspace/src/settings.rs index 9125b7642..cc1fdc822 100644 --- a/crates/pgls_workspace/src/settings.rs +++ b/crates/pgls_workspace/src/settings.rs @@ -18,7 +18,9 @@ use pgls_configuration::{ diagnostics::InvalidIgnorePattern, files::FilesConfiguration, migrations::{MigrationsConfiguration, PartialMigrationsConfiguration}, + pglinter::PglinterConfiguration, plpgsql_check::PlPgSqlCheckConfiguration, + splinter::SplinterConfiguration, }; use pgls_fs::PgLSPath; use sqlx::postgres::PgConnectOptions; @@ -213,6 +215,12 @@ pub struct Settings { /// Linter settings applied to all files in the workspace pub linter: LinterSettings, + /// Splinter (database linter) settings for the workspace + pub splinter: SplinterSettings, + + /// Pglinter (database linter via pglinter extension) settings for the workspace + pub pglinter: PglinterSettings, + /// Type checking settings for the workspace pub typecheck: TypecheckSettings, @@ -254,6 +262,16 @@ impl Settings { to_linter_settings(working_directory.clone(), LinterConfiguration::from(linter))?; } + // splinter part + if let Some(splinter) = configuration.splinter { + self.splinter = to_splinter_settings(SplinterConfiguration::from(splinter)); + } + + // pglinter part + if let Some(pglinter) = configuration.pglinter { + self.pglinter = to_pglinter_settings(PglinterConfiguration::from(pglinter)); + } + // typecheck part if let Some(typecheck) = configuration.typecheck { self.typecheck = to_typecheck_settings(TypecheckConfiguration::from(typecheck)); @@ -286,6 +304,16 @@ impl Settings { self.linter.rules.as_ref().map(Cow::Borrowed) } + /// Returns splinter rules. + pub fn as_splinter_rules(&self) -> Option> { + self.splinter.rules.as_ref().map(Cow::Borrowed) + } + + /// Returns pglinter rules. + pub fn as_pglinter_rules(&self) -> Option> { + self.pglinter.rules.as_ref().map(Cow::Borrowed) + } + /// It retrieves the severity based on the `code` of the rule and the current configuration. /// /// The code of the has the following pattern: `{group}/{rule_name}`. @@ -314,6 +342,20 @@ fn to_linter_settings( }) } +fn to_splinter_settings(conf: SplinterConfiguration) -> SplinterSettings { + SplinterSettings { + enabled: conf.enabled, + rules: Some(conf.rules), + } +} + +fn to_pglinter_settings(conf: PglinterConfiguration) -> PglinterSettings { + PglinterSettings { + enabled: conf.enabled, + rules: Some(conf.rules), + } +} + fn to_typecheck_settings(conf: TypecheckConfiguration) -> TypecheckSettings { TypecheckSettings { search_path: conf.search_path.into_iter().collect(), @@ -434,6 +476,44 @@ impl Default for LinterSettings { } } +/// Splinter (database linter) settings for the entire workspace +#[derive(Debug)] +pub struct SplinterSettings { + /// Enabled by default + pub enabled: bool, + + /// List of rules + pub rules: Option, +} + +impl Default for SplinterSettings { + fn default() -> Self { + Self { + enabled: true, + rules: Some(pgls_configuration::splinter::Rules::default()), + } + } +} + +/// Pglinter (database linter via pglinter extension) settings for the entire workspace +#[derive(Debug)] +pub struct PglinterSettings { + /// Disabled by default (pglinter extension might not be installed) + pub enabled: bool, + + /// List of rules + pub rules: Option, +} + +impl Default for PglinterSettings { + fn default() -> Self { + Self { + enabled: false, // Disabled by default since pglinter extension might not be installed + rules: Some(pgls_configuration::pglinter::Rules::default()), + } + } +} + /// Type checking settings for the entire workspace #[derive(Debug)] pub struct PlPgSqlCheckSettings { diff --git a/crates/pgls_workspace/src/workspace/server.rs b/crates/pgls_workspace/src/workspace/server.rs index e5feed085..bc9295f17 100644 --- a/crates/pgls_workspace/src/workspace/server.rs +++ b/crates/pgls_workspace/src/workspace/server.rs @@ -706,9 +706,71 @@ impl Workspace for WorkspaceServer { fn pull_db_diagnostics( &self, - _params: crate::features::diagnostics::PullDatabaseDiagnosticsParams, + params: crate::features::diagnostics::PullDatabaseDiagnosticsParams, ) -> Result { - Ok(PullDiagnosticsResult::default()) + let settings = self.workspaces(); + let Some(settings) = settings.settings() else { + debug!("No settings available. Returning empty diagnostics."); + return Ok(PullDiagnosticsResult::default()); + }; + + if !settings.splinter.enabled { + debug!("Splinter is disabled. Skipping database linting."); + return Ok(PullDiagnosticsResult::default()); + } + + let Some(pool) = self.get_current_connection() else { + debug!("No database connection available. Skipping splinter checks."); + return Ok(PullDiagnosticsResult::default()); + }; + + let (enabled_rules, disabled_rules) = AnalyserVisitorBuilder::new(settings) + .with_splinter_rules(¶ms.only, ¶ms.skip) + .finish(); + + let schema_cache = self.schema_cache.load(pool.clone()).ok(); + + let pool_clone = pool.clone(); + let schema_cache_clone = schema_cache.clone(); + let categories = params.categories; + let splinter_result = run_async(async move { + let filter = AnalysisFilter { + categories, + enabled_rules: Some(enabled_rules.as_slice()), + disabled_rules: &disabled_rules, + }; + let splinter_params = pgls_splinter::SplinterParams { + conn: &pool_clone, + schema_cache: schema_cache_clone.as_deref(), + }; + pgls_splinter::run_splinter(splinter_params, &filter).await + }); + + let splinter_diagnostics = match splinter_result { + Ok(Ok(diags)) => diags, + Ok(Err(sql_err)) => { + debug!("Splinter SQL error: {:?}", sql_err); + return Err(sql_err.into()); + } + Err(join_err) => { + debug!("Splinter join error: {:?}", join_err); + return Err(join_err); + } + }; + + let total = splinter_diagnostics.len(); + let max = params.max_diagnostics as usize; + let diagnostics: Vec = splinter_diagnostics + .into_iter() + .take(max) + .map(SDiagnostic::new) + .collect(); + let skipped = total.saturating_sub(max) as u32; + + Ok(PullDiagnosticsResult { + diagnostics, + skipped_diagnostics: skipped, + }) } #[ignored_path(path=¶ms.path)] diff --git a/crates/pgls_workspace/src/workspace/server/analyser.rs b/crates/pgls_workspace/src/workspace/server/analyser.rs index d8de3bbb3..cace0563d 100644 --- a/crates/pgls_workspace/src/workspace/server/analyser.rs +++ b/crates/pgls_workspace/src/workspace/server/analyser.rs @@ -4,30 +4,42 @@ use rustc_hash::FxHashSet; use crate::settings::Settings; -pub(crate) struct AnalyserVisitorBuilder<'a, 'b> { - lint: Option>, - settings: &'b Settings, +pub(crate) struct AnalyserVisitorBuilder<'a> { + lint: Option>, + splinter: Option>, + settings: &'a Settings, } -impl<'a, 'b> AnalyserVisitorBuilder<'a, 'b> { - pub(crate) fn new(settings: &'b Settings) -> Self { +impl<'a> AnalyserVisitorBuilder<'a> { + pub(crate) fn new(settings: &'a Settings) -> Self { Self { settings, lint: None, + splinter: None, } } #[must_use] pub(crate) fn with_linter_rules( mut self, - only: &'b [RuleSelector], - skip: &'b [RuleSelector], + only: &'a [RuleSelector], + skip: &'a [RuleSelector], ) -> Self { self.lint = Some(LintVisitor::new(only, skip, self.settings)); self } #[must_use] - pub(crate) fn finish(self) -> (Vec>, Vec>) { + pub(crate) fn with_splinter_rules( + mut self, + only: &'a [RuleSelector], + skip: &'a [RuleSelector], + ) -> Self { + self.splinter = Some(SplinterVisitor::new(only, skip, self.settings)); + self + } + + #[must_use] + pub(crate) fn finish(self) -> (Vec>, Vec>) { let mut disabled_rules = vec![]; let mut enabled_rules = vec![]; if let Some(mut lint) = self.lint { @@ -36,6 +48,12 @@ impl<'a, 'b> AnalyserVisitorBuilder<'a, 'b> { enabled_rules.extend(linter_enabled_rules); disabled_rules.extend(linter_disabled_rules); } + if let Some(mut splinter) = self.splinter { + pgls_splinter::registry::visit_registry(&mut splinter); + let (splinter_enabled_rules, splinter_disabled_rules) = splinter.finish(); + enabled_rules.extend(splinter_enabled_rules); + disabled_rules.extend(splinter_disabled_rules); + } (enabled_rules, disabled_rules) } @@ -43,19 +61,19 @@ impl<'a, 'b> AnalyserVisitorBuilder<'a, 'b> { /// Type meant to register all the lint rules #[derive(Debug)] -struct LintVisitor<'a, 'b> { - pub(crate) enabled_rules: FxHashSet>, - pub(crate) disabled_rules: FxHashSet>, - only: &'b [RuleSelector], - skip: &'b [RuleSelector], - settings: &'b Settings, +struct LintVisitor<'a> { + pub(crate) enabled_rules: FxHashSet>, + pub(crate) disabled_rules: FxHashSet>, + only: &'a [RuleSelector], + skip: &'a [RuleSelector], + settings: &'a Settings, } -impl<'a, 'b> LintVisitor<'a, 'b> { +impl<'a> LintVisitor<'a> { pub(crate) fn new( - only: &'b [RuleSelector], - skip: &'b [RuleSelector], - settings: &'b Settings, + only: &'a [RuleSelector], + skip: &'a [RuleSelector], + settings: &'a Settings, ) -> Self { Self { enabled_rules: Default::default(), @@ -66,7 +84,12 @@ impl<'a, 'b> LintVisitor<'a, 'b> { } } - fn finish(mut self) -> (FxHashSet>, FxHashSet>) { + fn finish( + mut self, + ) -> ( + FxHashSet>, + FxHashSet>, + ) { let has_only_filter = !self.only.is_empty(); if !has_only_filter { @@ -109,7 +132,7 @@ impl<'a, 'b> LintVisitor<'a, 'b> { } } -impl RegistryVisitor for LintVisitor<'_, '_> { +impl RegistryVisitor for LintVisitor<'_> { fn record_category(&mut self) { if C::CATEGORY == RuleCategory::Lint { C::record_groups(self) @@ -138,18 +161,119 @@ impl RegistryVisitor for LintVisitor<'_, '_> { } } +/// Type meant to register all the splinter (database lint) rules +#[derive(Debug)] +struct SplinterVisitor<'a> { + pub(crate) enabled_rules: FxHashSet>, + pub(crate) disabled_rules: FxHashSet>, + only: &'a [RuleSelector], + skip: &'a [RuleSelector], + settings: &'a Settings, +} + +impl<'a> SplinterVisitor<'a> { + pub(crate) fn new( + only: &'a [RuleSelector], + skip: &'a [RuleSelector], + settings: &'a Settings, + ) -> Self { + Self { + enabled_rules: Default::default(), + disabled_rules: Default::default(), + only, + skip, + settings, + } + } + + fn finish( + mut self, + ) -> ( + FxHashSet>, + FxHashSet>, + ) { + let has_only_filter = !self.only.is_empty(); + + if !has_only_filter { + let enabled_rules = self + .settings + .as_splinter_rules() + .map(|rules| rules.as_enabled_rules()) + .unwrap_or_default(); + + self.enabled_rules.extend(enabled_rules); + + let disabled_rules = self + .settings + .as_splinter_rules() + .map(|rules| rules.as_disabled_rules()) + .unwrap_or_default(); + self.disabled_rules.extend(disabled_rules); + } + + (self.enabled_rules, self.disabled_rules) + } + + fn push_rule(&mut self) + where + R: RuleMeta + 'static, + { + for selector in self.only { + let filter = RuleFilter::from(selector); + if filter.match_rule::() { + self.enabled_rules.insert(filter); + } + } + for selector in self.skip { + let filter = RuleFilter::from(selector); + if filter.match_rule::() { + self.disabled_rules.insert(filter); + } + } + } +} + +impl RegistryVisitor for SplinterVisitor<'_> { + fn record_category(&mut self) { + // Splinter uses Lint as its kind in declare_category! macro + // We always record because we're visiting the splinter registry specifically + C::record_groups(self) + } + + fn record_group(&mut self) { + for selector in self.only { + if RuleFilter::from(selector).match_group::() { + G::record_rules(self) + } + } + + for selector in self.skip { + if RuleFilter::from(selector).match_group::() { + G::record_rules(self) + } + } + } + + fn record_rule(&mut self) + where + R: RuleMeta + 'static, + { + self.push_rule::() + } +} + #[cfg(test)] mod tests { use pgls_analyse::RuleFilter; use pgls_configuration::{RuleConfiguration, Rules, linter::Safety}; use crate::{ - settings::{LinterSettings, Settings}, + settings::{LinterSettings, Settings, SplinterSettings}, workspace::server::analyser::AnalyserVisitorBuilder, }; #[test] - fn recognizes_disabled_rules() { + fn recognizes_disabled_linter_rules() { let settings = Settings { linter: LinterSettings { rules: Some(Rules { @@ -175,4 +299,77 @@ mod tests { vec![RuleFilter::Rule("safety", "banDropColumn")] ) } + + #[test] + fn recognizes_disabled_splinter_rules() { + use pgls_configuration::splinter::{Performance, Rules as SplinterRules}; + + let settings = Settings { + splinter: SplinterSettings { + enabled: true, + rules: Some(SplinterRules { + performance: Some(Performance { + auth_rls_initplan: Some(RuleConfiguration::Plain( + pgls_configuration::RulePlainConfiguration::Off, + )), + ..Default::default() + }), + ..Default::default() + }), + }, + ..Default::default() + }; + + let (_, disabled_rules) = AnalyserVisitorBuilder::new(&settings) + .with_splinter_rules(&[], &[]) + .finish(); + + assert_eq!( + disabled_rules, + vec![RuleFilter::Rule("performance", "authRlsInitplan")] + ) + } + + #[test] + fn combines_linter_and_splinter_rules() { + use pgls_configuration::splinter::{Performance, Rules as SplinterRules}; + + let settings = Settings { + linter: LinterSettings { + rules: Some(Rules { + safety: Some(Safety { + ban_drop_column: Some(RuleConfiguration::Plain( + pgls_configuration::RulePlainConfiguration::Off, + )), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }, + splinter: SplinterSettings { + enabled: true, + rules: Some(SplinterRules { + performance: Some(Performance { + auth_rls_initplan: Some(RuleConfiguration::Plain( + pgls_configuration::RulePlainConfiguration::Off, + )), + ..Default::default() + }), + ..Default::default() + }), + }, + ..Default::default() + }; + + let (_, disabled_rules) = AnalyserVisitorBuilder::new(&settings) + .with_linter_rules(&[], &[]) + .with_splinter_rules(&[], &[]) + .finish(); + + // Should contain disabled rules from both linter and splinter + assert!(disabled_rules.contains(&RuleFilter::Rule("safety", "banDropColumn"))); + assert!(disabled_rules.contains(&RuleFilter::Rule("performance", "authRlsInitplan"))); + assert_eq!(disabled_rules.len(), 2); + } } diff --git a/docs/schema.json b/docs/schema.json index 93dc6ceae..e32ffb850 100644 --- a/docs/schema.json +++ b/docs/schema.json @@ -66,6 +66,17 @@ } ] }, + "pglinter": { + "description": "The configuration for pglinter", + "anyOf": [ + { + "$ref": "#/definitions/PglinterConfiguration" + }, + { + "type": "null" + } + ] + }, "plpgsqlCheck": { "description": "The configuration for type checking", "anyOf": [ @@ -77,6 +88,17 @@ } ] }, + "splinter": { + "description": "The configuration for splinter", + "anyOf": [ + { + "$ref": "#/definitions/SplinterConfiguration" + }, + { + "type": "null" + } + ] + }, "typecheck": { "description": "The configuration for type checking", "anyOf": [ @@ -102,6 +124,213 @@ }, "additionalProperties": false, "definitions": { + "Base": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] + }, + "compositePrimaryKeyTooManyColumns": { + "description": "CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyObjectsWithUppercase": { + "description": "HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyRedudantIndex": { + "description": "HowManyRedudantIndex (B002): Count number of redundant index vs nb index.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTableWithoutIndexOnFk": { + "description": "HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTableWithoutPrimaryKey": { + "description": "HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesNeverSelected": { + "description": "HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithFkMismatch": { + "description": "HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithFkOutsideSchema": { + "description": "HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithReservedKeywords": { + "description": "HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithSameTrigger": { + "description": "HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyUnusedIndex": { + "description": "HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + }, + "severalTableOwnerInSchema": { + "description": "SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "Cluster": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] + }, + "passwordEncryptionIsMd5": { + "description": "PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists": { + "description": "PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "pgHbaEntriesWithMethodTrustShouldNotExists": { + "description": "PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + } + }, + "additionalProperties": false + }, "DatabaseConfiguration": { "description": "The configuration of the database connection.", "type": "object", @@ -246,7 +475,37 @@ "description": "List of rules", "anyOf": [ { - "$ref": "#/definitions/Rules" + "$ref": "#/definitions/LinterRules" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "LinterRules": { + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules. The rules that belong to `nursery` won't be enabled.", + "type": [ + "boolean", + "null" + ] + }, + "recommended": { + "description": "It enables the lint rules recommended by Postgres Language Server. `true` by default.", + "type": [ + "boolean", + "null" + ] + }, + "safety": { + "anyOf": [ + { + "$ref": "#/definitions/Safety" }, { "type": "null" @@ -279,92 +538,450 @@ }, "additionalProperties": false }, - "PlPgSqlCheckConfiguration": { - "description": "The configuration for type checking.", + "Performance": { + "description": "A list of rules that belong to this group", "type": "object", "properties": { - "enabled": { - "description": "if `false`, it disables the feature and pglpgsql_check won't be executed. `true` by default", + "all": { + "description": "It enables ALL rules for this group.", "type": [ "boolean", "null" ] - } - }, - "additionalProperties": false - }, - "RuleConfiguration": { - "anyOf": [ - { - "$ref": "#/definitions/RulePlainConfiguration" }, - { - "$ref": "#/definitions/RuleWithNoOptions" - } - ] - }, - "RulePlainConfiguration": { - "type": "string", - "enum": [ - "warn", - "error", - "info", - "off" - ] - }, - "RuleWithNoOptions": { - "type": "object", - "required": [ - "level" - ], - "properties": { - "level": { + "authRlsInitplan": { + "description": "Auth RLS Initialization Plan: Detects if calls to `current_setting()` and `auth.()` in RLS policies are being unnecessarily re-evaluated for each row", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "duplicateIndex": { + "description": "Duplicate Index: Detects cases where two ore more identical indexes exist.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "multiplePermissivePolicies": { + "description": "Multiple Permissive Policies: Detects if multiple permissive row level security policies are present on a table for the same `role` and `action` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "noPrimaryKey": { + "description": "No Primary Key: Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + }, + "tableBloat": { + "description": "Table Bloat: Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "unindexedForeignKeys": { + "description": "Unindexed foreign keys: Identifies foreign key constraints without a covering index, which can impact database performance.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "unusedIndex": { + "description": "Unused Index: Detects if an index has never been used and may be a candidate for removal.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "PglinterConfiguration": { + "type": "object", + "properties": { + "enabled": { + "description": "if `false`, it disables the feature and the linter won't be executed. `true` by default", + "type": [ + "boolean", + "null" + ] + }, + "rules": { + "description": "List of rules", + "anyOf": [ + { + "$ref": "#/definitions/PglinterRules" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "PglinterRules": { + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules. The rules that belong to `nursery` won't be enabled.", + "type": [ + "boolean", + "null" + ] + }, + "base": { + "anyOf": [ + { + "$ref": "#/definitions/Base" + }, + { + "type": "null" + } + ] + }, + "cluster": { + "anyOf": [ + { + "$ref": "#/definitions/Cluster" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the lint rules recommended by Postgres Language Server. `true` by default.", + "type": [ + "boolean", + "null" + ] + }, + "schema": { + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "PlPgSqlCheckConfiguration": { + "description": "The configuration for type checking.", + "type": "object", + "properties": { + "enabled": { + "description": "if `false`, it disables the feature and pglpgsql_check won't be executed. `true` by default", + "type": [ + "boolean", + "null" + ] + } + }, + "additionalProperties": false + }, + "RuleConfiguration": { + "anyOf": [ + { + "$ref": "#/definitions/RulePlainConfiguration" + }, + { + "$ref": "#/definitions/RuleWithNoOptions" + } + ] + }, + "RulePlainConfiguration": { + "type": "string", + "enum": [ + "warn", + "error", + "info", + "off" + ] + }, + "RuleWithNoOptions": { + "type": "object", + "required": [ + "level" + ], + "properties": { + "level": { "description": "The severity of the emitted diagnostics by the rule", "allOf": [ { - "$ref": "#/definitions/RulePlainConfiguration" + "$ref": "#/definitions/RulePlainConfiguration" + } + ] + } + }, + "additionalProperties": false + }, + "Safety": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "addSerialColumn": { + "description": "Adding a column with a SERIAL type or GENERATED ALWAYS AS ... STORED causes a full table rewrite.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingFieldWithDefault": { + "description": "Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingForeignKeyConstraint": { + "description": "Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingNotNullField": { + "description": "Setting a column NOT NULL blocks reads while the table is scanned.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingPrimaryKeyConstraint": { + "description": "Adding a primary key constraint results in locks and table rewrites.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "addingRequiredField": { + "description": "Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] + }, + "banCharField": { + "description": "Using CHAR(n) or CHARACTER(n) types is discouraged.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "banConcurrentIndexCreationInTransaction": { + "description": "Concurrent index creation is not allowed within a transaction.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "banDropColumn": { + "description": "Dropping a column may break existing clients.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "banDropDatabase": { + "description": "Dropping a database may break existing clients (and everything else, really).", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "banDropNotNull": { + "description": "Dropping a NOT NULL constraint may break existing clients.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "banDropTable": { + "description": "Dropping a table may break existing clients.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "banTruncateCascade": { + "description": "Using TRUNCATE's CASCADE option will truncate any tables that are also foreign-keyed to the specified tables.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "changingColumnType": { + "description": "Changing a column type may break existing clients.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "constraintMissingNotValid": { + "description": "Adding constraints without NOT VALID blocks all reads and writes.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "creatingEnum": { + "description": "Creating enum types is not recommended for new applications.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" } ] - } - }, - "additionalProperties": false - }, - "Rules": { - "type": "object", - "properties": { - "all": { - "description": "It enables ALL rules. The rules that belong to `nursery` won't be enabled.", - "type": [ - "boolean", - "null" + }, + "disallowUniqueConstraint": { + "description": "Disallow adding a UNIQUE constraint without using an existing index.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } ] }, - "recommended": { - "description": "It enables the lint rules recommended by Postgres Language Server. `true` by default.", - "type": [ - "boolean", - "null" + "lockTimeoutWarning": { + "description": "Taking a dangerous lock without setting a lock timeout can cause indefinite blocking.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } ] }, - "safety": { + "multipleAlterTable": { + "description": "Multiple ALTER TABLE statements on the same table should be combined into a single statement.", "anyOf": [ { - "$ref": "#/definitions/Safety" + "$ref": "#/definitions/RuleConfiguration" }, { "type": "null" } ] - } - }, - "additionalProperties": false - }, - "Safety": { - "description": "A list of rules that belong to this group", - "type": "object", - "properties": { - "addSerialColumn": { - "description": "Adding a column with a SERIAL type or GENERATED ALWAYS AS ... STORED causes a full table rewrite.", + }, + "preferBigInt": { + "description": "Prefer BIGINT over smaller integer types.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -374,8 +991,8 @@ } ] }, - "addingFieldWithDefault": { - "description": "Adding a column with a DEFAULT value may lead to a table rewrite while holding an ACCESS EXCLUSIVE lock.", + "preferBigintOverInt": { + "description": "Prefer BIGINT over INT/INTEGER types.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -385,8 +1002,8 @@ } ] }, - "addingForeignKeyConstraint": { - "description": "Adding a foreign key constraint requires a table scan and a SHARE ROW EXCLUSIVE lock on both tables, which blocks writes.", + "preferBigintOverSmallint": { + "description": "Prefer BIGINT over SMALLINT types.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -396,8 +1013,8 @@ } ] }, - "addingNotNullField": { - "description": "Setting a column NOT NULL blocks reads while the table is scanned.", + "preferIdentity": { + "description": "Prefer using IDENTITY columns over serial columns.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -407,8 +1024,8 @@ } ] }, - "addingPrimaryKeyConstraint": { - "description": "Adding a primary key constraint results in locks and table rewrites.", + "preferJsonb": { + "description": "Prefer JSONB over JSON types.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -418,8 +1035,8 @@ } ] }, - "addingRequiredField": { - "description": "Adding a new column that is NOT NULL and has no default value to an existing table effectively makes it required.", + "preferRobustStmts": { + "description": "Prefer statements with guards for robustness in migrations.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -429,15 +1046,19 @@ } ] }, - "all": { - "description": "It enables ALL rules for this group.", - "type": [ - "boolean", - "null" + "preferTextField": { + "description": "Prefer using TEXT over VARCHAR(n) types.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } ] }, - "banCharField": { - "description": "Using CHAR(n) or CHARACTER(n) types is discouraged.", + "preferTimestamptz": { + "description": "Prefer TIMESTAMPTZ over TIMESTAMP types.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -447,8 +1068,15 @@ } ] }, - "banConcurrentIndexCreationInTransaction": { - "description": "Concurrent index creation is not allowed within a transaction.", + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + }, + "renamingColumn": { + "description": "Renaming columns may break existing queries and application code.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -458,8 +1086,8 @@ } ] }, - "banDropColumn": { - "description": "Dropping a column may break existing clients.", + "renamingTable": { + "description": "Renaming tables may break existing queries and application code.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -469,8 +1097,8 @@ } ] }, - "banDropDatabase": { - "description": "Dropping a database may break existing clients (and everything else, really).", + "requireConcurrentIndexCreation": { + "description": "Creating indexes non-concurrently can lock the table for writes.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -480,8 +1108,8 @@ } ] }, - "banDropNotNull": { - "description": "Dropping a NOT NULL constraint may break existing clients.", + "requireConcurrentIndexDeletion": { + "description": "Dropping indexes non-concurrently can lock the table for reads.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -491,8 +1119,8 @@ } ] }, - "banDropTable": { - "description": "Dropping a table may break existing clients.", + "runningStatementWhileHoldingAccessExclusive": { + "description": "Running additional statements while holding an ACCESS EXCLUSIVE lock blocks all table access.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -502,8 +1130,8 @@ } ] }, - "banTruncateCascade": { - "description": "Using TRUNCATE's CASCADE option will truncate any tables that are also foreign-keyed to the specified tables.", + "transactionNesting": { + "description": "Detects problematic transaction nesting that could lead to unexpected behavior.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -512,9 +1140,23 @@ "type": "null" } ] + } + }, + "additionalProperties": false + }, + "Schema": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] }, - "changingColumnType": { - "description": "Changing a column type may break existing clients.", + "ownerSchemaIsInternalRole": { + "description": "OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -524,8 +1166,15 @@ } ] }, - "constraintMissingNotValid": { - "description": "Adding constraints without NOT VALID blocks all reads and writes.", + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + }, + "schemaOwnerDoNotMatchTableOwner": { + "description": "SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -535,8 +1184,8 @@ } ] }, - "creatingEnum": { - "description": "Creating enum types is not recommended for new applications.", + "schemaPrefixedOrSuffixedWithEnvt": { + "description": "SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -546,8 +1195,8 @@ } ] }, - "disallowUniqueConstraint": { - "description": "Disallow adding a UNIQUE constraint without using an existing index.", + "schemaWithDefaultRoleNotGranted": { + "description": "SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -557,8 +1206,8 @@ } ] }, - "lockTimeoutWarning": { - "description": "Taking a dangerous lock without setting a lock timeout can cause indefinite blocking.", + "unsecuredPublicSchema": { + "description": "UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -567,9 +1216,23 @@ "type": "null" } ] + } + }, + "additionalProperties": false + }, + "Security": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] }, - "multipleAlterTable": { - "description": "Multiple ALTER TABLE statements on the same table should be combined into a single statement.", + "authUsersExposed": { + "description": "Exposed Auth Users: Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -579,8 +1242,8 @@ } ] }, - "preferBigInt": { - "description": "Prefer BIGINT over smaller integer types.", + "extensionInPublic": { + "description": "Extension in Public: Detects extensions installed in the `public` schema.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -590,8 +1253,8 @@ } ] }, - "preferBigintOverInt": { - "description": "Prefer BIGINT over INT/INTEGER types.", + "extensionVersionsOutdated": { + "description": "Extension Versions Outdated: Detects extensions that are not using the default (recommended) version.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -601,8 +1264,8 @@ } ] }, - "preferBigintOverSmallint": { - "description": "Prefer BIGINT over SMALLINT types.", + "fkeyToAuthUnique": { + "description": "Foreign Key to Auth Unique Constraint: Detects user defined foreign keys to unique constraints in the auth schema.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -612,8 +1275,8 @@ } ] }, - "preferIdentity": { - "description": "Prefer using IDENTITY columns over serial columns.", + "foreignTableInApi": { + "description": "Foreign Table in API: Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -623,8 +1286,8 @@ } ] }, - "preferJsonb": { - "description": "Prefer JSONB over JSON types.", + "functionSearchPathMutable": { + "description": "Function Search Path Mutable: Detects functions where the search_path parameter is not set.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -634,8 +1297,8 @@ } ] }, - "preferRobustStmts": { - "description": "Prefer statements with guards for robustness in migrations.", + "insecureQueueExposedInApi": { + "description": "Insecure Queue Exposed in API: Detects cases where an insecure Queue is exposed over Data APIs", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -645,8 +1308,8 @@ } ] }, - "preferTextField": { - "description": "Prefer using TEXT over VARCHAR(n) types.", + "materializedViewInApi": { + "description": "Materialized View in API: Detects materialized views that are accessible over the Data APIs.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -656,8 +1319,8 @@ } ] }, - "preferTimestamptz": { - "description": "Prefer TIMESTAMPTZ over TIMESTAMP types.", + "policyExistsRlsDisabled": { + "description": "Policy Exists RLS Disabled: Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -674,8 +1337,8 @@ "null" ] }, - "renamingColumn": { - "description": "Renaming columns may break existing queries and application code.", + "rlsDisabledInPublic": { + "description": "RLS Disabled in Public: Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -685,8 +1348,8 @@ } ] }, - "renamingTable": { - "description": "Renaming tables may break existing queries and application code.", + "rlsEnabledNoPolicy": { + "description": "RLS Enabled No Policy: Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -696,8 +1359,8 @@ } ] }, - "requireConcurrentIndexCreation": { - "description": "Creating indexes non-concurrently can lock the table for writes.", + "rlsReferencesUserMetadata": { + "description": "RLS references user metadata: Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -707,8 +1370,8 @@ } ] }, - "requireConcurrentIndexDeletion": { - "description": "Dropping indexes non-concurrently can lock the table for reads.", + "securityDefinerView": { + "description": "Security Definer View: Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -718,8 +1381,8 @@ } ] }, - "runningStatementWhileHoldingAccessExclusive": { - "description": "Running additional statements while holding an ACCESS EXCLUSIVE lock blocks all table access.", + "unsupportedRegTypes": { + "description": "Unsupported reg types: Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade.", "anyOf": [ { "$ref": "#/definitions/RuleConfiguration" @@ -728,12 +1391,65 @@ "type": "null" } ] + } + }, + "additionalProperties": false + }, + "SplinterConfiguration": { + "type": "object", + "properties": { + "enabled": { + "description": "if `false`, it disables the feature and the linter won't be executed. `true` by default", + "type": [ + "boolean", + "null" + ] }, - "transactionNesting": { - "description": "Detects problematic transaction nesting that could lead to unexpected behavior.", + "rules": { + "description": "List of rules", "anyOf": [ { - "$ref": "#/definitions/RuleConfiguration" + "$ref": "#/definitions/SplinterRules" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "SplinterRules": { + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules. The rules that belong to `nursery` won't be enabled.", + "type": [ + "boolean", + "null" + ] + }, + "performance": { + "anyOf": [ + { + "$ref": "#/definitions/Performance" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the lint rules recommended by Postgres Language Server. `true` by default.", + "type": [ + "boolean", + "null" + ] + }, + "security": { + "anyOf": [ + { + "$ref": "#/definitions/Security" }, { "type": "null" diff --git a/justfile b/justfile index 5dd54a224..524fe326c 100644 --- a/justfile +++ b/justfile @@ -29,6 +29,7 @@ gen-lint: cargo run -p xtask_codegen -- configuration cargo run -p xtask_codegen -- bindings cargo run -p xtask_codegen -- splinter + cargo run -p xtask_codegen -- pglinter cargo run -p rules_check cargo run -p docs_codegen just format diff --git a/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts b/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts index cf42e5145..bf79cb234 100644 --- a/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts @@ -95,6 +95,28 @@ export type Category = | "lint/safety/requireConcurrentIndexDeletion" | "lint/safety/runningStatementWhileHoldingAccessExclusive" | "lint/safety/transactionNesting" + | "pglinter/extensionNotInstalled" + | "pglinter/ruleDisabledInExtension" + | "pglinter/base/compositePrimaryKeyTooManyColumns" + | "pglinter/base/howManyObjectsWithUppercase" + | "pglinter/base/howManyRedudantIndex" + | "pglinter/base/howManyTableWithoutIndexOnFk" + | "pglinter/base/howManyTableWithoutPrimaryKey" + | "pglinter/base/howManyTablesNeverSelected" + | "pglinter/base/howManyTablesWithFkMismatch" + | "pglinter/base/howManyTablesWithFkOutsideSchema" + | "pglinter/base/howManyTablesWithReservedKeywords" + | "pglinter/base/howManyTablesWithSameTrigger" + | "pglinter/base/howManyUnusedIndex" + | "pglinter/base/severalTableOwnerInSchema" + | "pglinter/cluster/passwordEncryptionIsMd5" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists" + | "pglinter/schema/ownerSchemaIsInternalRole" + | "pglinter/schema/schemaOwnerDoNotMatchTableOwner" + | "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt" + | "pglinter/schema/schemaWithDefaultRoleNotGranted" + | "pglinter/schema/unsecuredPublicSchema" | "splinter/performance/authRlsInitplan" | "splinter/performance/duplicateIndex" | "splinter/performance/multiplePermissivePolicies" @@ -135,7 +157,11 @@ export type Category = | "lint/safety" | "splinter" | "splinter/performance" - | "splinter/security"; + | "splinter/security" + | "pglinter" + | "pglinter/base" + | "pglinter/cluster" + | "pglinter/schema"; export interface Location { path?: Resource_for_String; sourceCode?: string; @@ -310,10 +336,18 @@ export interface PartialConfiguration { * Configure migrations */ migrations?: PartialMigrationsConfiguration; + /** + * The configuration for pglinter + */ + pglinter?: PartialPglinterConfiguration; /** * The configuration for type checking */ plpgsqlCheck?: PartialPlPgSqlCheckConfiguration; + /** + * The configuration for splinter + */ + splinter?: PartialSplinterConfiguration; /** * The configuration for type checking */ @@ -391,7 +425,7 @@ export interface PartialLinterConfiguration { /** * List of rules */ - rules?: Rules; + rules?: LinterRules; } /** * The configuration of the filesystem @@ -406,6 +440,16 @@ export interface PartialMigrationsConfiguration { */ migrationsDir?: string; } +export interface PartialPglinterConfiguration { + /** + * if `false`, it disables the feature and the linter won't be executed. `true` by default + */ + enabled?: boolean; + /** + * List of rules + */ + rules?: PglinterRules; +} /** * The configuration for type checking. */ @@ -415,6 +459,16 @@ export interface PartialPlPgSqlCheckConfiguration { */ enabled?: boolean; } +export interface PartialSplinterConfiguration { + /** + * if `false`, it disables the feature and the linter won't be executed. `true` by default + */ + enabled?: boolean; + /** + * List of rules + */ + rules?: SplinterRules; +} /** * The configuration for type checking. */ @@ -455,7 +509,7 @@ If we can't find the configuration, it will attempt to use the current working d */ useIgnoreFile?: boolean; } -export interface Rules { +export interface LinterRules { /** * It enables ALL rules. The rules that belong to `nursery` won't be enabled. */ @@ -466,6 +520,31 @@ export interface Rules { recommended?: boolean; safety?: Safety; } +export interface PglinterRules { + /** + * It enables ALL rules. The rules that belong to `nursery` won't be enabled. + */ + all?: boolean; + base?: Base; + cluster?: Cluster; + /** + * It enables the lint rules recommended by Postgres Language Server. `true` by default. + */ + recommended?: boolean; + schema?: Schema; +} +export interface SplinterRules { + /** + * It enables ALL rules. The rules that belong to `nursery` won't be enabled. + */ + all?: boolean; + performance?: Performance; + /** + * It enables the lint rules recommended by Postgres Language Server. `true` by default. + */ + recommended?: boolean; + security?: Security; +} export type VcsClientKind = "git"; /** * A list of rules that belong to this group @@ -612,6 +691,235 @@ export interface Safety { */ transactionNesting?: RuleConfiguration_for_Null; } +/** + * A list of rules that belong to this group + */ +export interface Base { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns + */ + compositePrimaryKeyTooManyColumns?: RuleConfiguration_for_Null; + /** + * HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns. + */ + howManyObjectsWithUppercase?: RuleConfiguration_for_Null; + /** + * HowManyRedudantIndex (B002): Count number of redundant index vs nb index. + */ + howManyRedudantIndex?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key. + */ + howManyTableWithoutIndexOnFk?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key. + */ + howManyTableWithoutPrimaryKey?: RuleConfiguration_for_Null; + /** + * HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected. + */ + howManyTablesNeverSelected?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type. + */ + howManyTablesWithFkMismatch?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema. + */ + howManyTablesWithFkOutsideSchema?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names. + */ + howManyTablesWithReservedKeywords?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers. + */ + howManyTablesWithSameTrigger?: RuleConfiguration_for_Null; + /** + * HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.) + */ + howManyUnusedIndex?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners. + */ + severalTableOwnerInSchema?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Cluster { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256. + */ + passwordEncryptionIsMd5?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustShouldNotExists?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; +} +/** + * A list of rules that belong to this group + */ +export interface Schema { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary). + */ + ownerSchemaIsInternalRole?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match. + */ + schemaOwnerDoNotMatchTableOwner?: RuleConfiguration_for_Null; + /** + * SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy. + */ + schemaPrefixedOrSuffixedWithEnvt?: RuleConfiguration_for_Null; + /** + * SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it. + */ + schemaWithDefaultRoleNotGranted?: RuleConfiguration_for_Null; + /** + * UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects. + */ + unsecuredPublicSchema?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Performance { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * Auth RLS Initialization Plan: Detects if calls to `current_setting()` and `auth.()` in RLS policies are being unnecessarily re-evaluated for each row + */ + authRlsInitplan?: RuleConfiguration_for_Null; + /** + * Duplicate Index: Detects cases where two ore more identical indexes exist. + */ + duplicateIndex?: RuleConfiguration_for_Null; + /** + * Multiple Permissive Policies: Detects if multiple permissive row level security policies are present on a table for the same `role` and `action` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query. + */ + multiplePermissivePolicies?: RuleConfiguration_for_Null; + /** + * No Primary Key: Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale. + */ + noPrimaryKey?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * Table Bloat: Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster. + */ + tableBloat?: RuleConfiguration_for_Null; + /** + * Unindexed foreign keys: Identifies foreign key constraints without a covering index, which can impact database performance. + */ + unindexedForeignKeys?: RuleConfiguration_for_Null; + /** + * Unused Index: Detects if an index has never been used and may be a candidate for removal. + */ + unusedIndex?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Security { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * Exposed Auth Users: Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security. + */ + authUsersExposed?: RuleConfiguration_for_Null; + /** + * Extension in Public: Detects extensions installed in the `public` schema. + */ + extensionInPublic?: RuleConfiguration_for_Null; + /** + * Extension Versions Outdated: Detects extensions that are not using the default (recommended) version. + */ + extensionVersionsOutdated?: RuleConfiguration_for_Null; + /** + * Foreign Key to Auth Unique Constraint: Detects user defined foreign keys to unique constraints in the auth schema. + */ + fkeyToAuthUnique?: RuleConfiguration_for_Null; + /** + * Foreign Table in API: Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies. + */ + foreignTableInApi?: RuleConfiguration_for_Null; + /** + * Function Search Path Mutable: Detects functions where the search_path parameter is not set. + */ + functionSearchPathMutable?: RuleConfiguration_for_Null; + /** + * Insecure Queue Exposed in API: Detects cases where an insecure Queue is exposed over Data APIs + */ + insecureQueueExposedInApi?: RuleConfiguration_for_Null; + /** + * Materialized View in API: Detects materialized views that are accessible over the Data APIs. + */ + materializedViewInApi?: RuleConfiguration_for_Null; + /** + * Policy Exists RLS Disabled: Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table. + */ + policyExistsRlsDisabled?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * RLS Disabled in Public: Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST + */ + rlsDisabledInPublic?: RuleConfiguration_for_Null; + /** + * RLS Enabled No Policy: Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created. + */ + rlsEnabledNoPolicy?: RuleConfiguration_for_Null; + /** + * RLS references user metadata: Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy. + */ + rlsReferencesUserMetadata?: RuleConfiguration_for_Null; + /** + * Security Definer View: Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user + */ + securityDefinerView?: RuleConfiguration_for_Null; + /** + * Unsupported reg types: Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade. + */ + unsupportedRegTypes?: RuleConfiguration_for_Null; +} export type RuleConfiguration_for_Null = | RulePlainConfiguration | RuleWithOptions_for_Null; diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index cf42e5145..bf79cb234 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -95,6 +95,28 @@ export type Category = | "lint/safety/requireConcurrentIndexDeletion" | "lint/safety/runningStatementWhileHoldingAccessExclusive" | "lint/safety/transactionNesting" + | "pglinter/extensionNotInstalled" + | "pglinter/ruleDisabledInExtension" + | "pglinter/base/compositePrimaryKeyTooManyColumns" + | "pglinter/base/howManyObjectsWithUppercase" + | "pglinter/base/howManyRedudantIndex" + | "pglinter/base/howManyTableWithoutIndexOnFk" + | "pglinter/base/howManyTableWithoutPrimaryKey" + | "pglinter/base/howManyTablesNeverSelected" + | "pglinter/base/howManyTablesWithFkMismatch" + | "pglinter/base/howManyTablesWithFkOutsideSchema" + | "pglinter/base/howManyTablesWithReservedKeywords" + | "pglinter/base/howManyTablesWithSameTrigger" + | "pglinter/base/howManyUnusedIndex" + | "pglinter/base/severalTableOwnerInSchema" + | "pglinter/cluster/passwordEncryptionIsMd5" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists" + | "pglinter/schema/ownerSchemaIsInternalRole" + | "pglinter/schema/schemaOwnerDoNotMatchTableOwner" + | "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt" + | "pglinter/schema/schemaWithDefaultRoleNotGranted" + | "pglinter/schema/unsecuredPublicSchema" | "splinter/performance/authRlsInitplan" | "splinter/performance/duplicateIndex" | "splinter/performance/multiplePermissivePolicies" @@ -135,7 +157,11 @@ export type Category = | "lint/safety" | "splinter" | "splinter/performance" - | "splinter/security"; + | "splinter/security" + | "pglinter" + | "pglinter/base" + | "pglinter/cluster" + | "pglinter/schema"; export interface Location { path?: Resource_for_String; sourceCode?: string; @@ -310,10 +336,18 @@ export interface PartialConfiguration { * Configure migrations */ migrations?: PartialMigrationsConfiguration; + /** + * The configuration for pglinter + */ + pglinter?: PartialPglinterConfiguration; /** * The configuration for type checking */ plpgsqlCheck?: PartialPlPgSqlCheckConfiguration; + /** + * The configuration for splinter + */ + splinter?: PartialSplinterConfiguration; /** * The configuration for type checking */ @@ -391,7 +425,7 @@ export interface PartialLinterConfiguration { /** * List of rules */ - rules?: Rules; + rules?: LinterRules; } /** * The configuration of the filesystem @@ -406,6 +440,16 @@ export interface PartialMigrationsConfiguration { */ migrationsDir?: string; } +export interface PartialPglinterConfiguration { + /** + * if `false`, it disables the feature and the linter won't be executed. `true` by default + */ + enabled?: boolean; + /** + * List of rules + */ + rules?: PglinterRules; +} /** * The configuration for type checking. */ @@ -415,6 +459,16 @@ export interface PartialPlPgSqlCheckConfiguration { */ enabled?: boolean; } +export interface PartialSplinterConfiguration { + /** + * if `false`, it disables the feature and the linter won't be executed. `true` by default + */ + enabled?: boolean; + /** + * List of rules + */ + rules?: SplinterRules; +} /** * The configuration for type checking. */ @@ -455,7 +509,7 @@ If we can't find the configuration, it will attempt to use the current working d */ useIgnoreFile?: boolean; } -export interface Rules { +export interface LinterRules { /** * It enables ALL rules. The rules that belong to `nursery` won't be enabled. */ @@ -466,6 +520,31 @@ export interface Rules { recommended?: boolean; safety?: Safety; } +export interface PglinterRules { + /** + * It enables ALL rules. The rules that belong to `nursery` won't be enabled. + */ + all?: boolean; + base?: Base; + cluster?: Cluster; + /** + * It enables the lint rules recommended by Postgres Language Server. `true` by default. + */ + recommended?: boolean; + schema?: Schema; +} +export interface SplinterRules { + /** + * It enables ALL rules. The rules that belong to `nursery` won't be enabled. + */ + all?: boolean; + performance?: Performance; + /** + * It enables the lint rules recommended by Postgres Language Server. `true` by default. + */ + recommended?: boolean; + security?: Security; +} export type VcsClientKind = "git"; /** * A list of rules that belong to this group @@ -612,6 +691,235 @@ export interface Safety { */ transactionNesting?: RuleConfiguration_for_Null; } +/** + * A list of rules that belong to this group + */ +export interface Base { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns + */ + compositePrimaryKeyTooManyColumns?: RuleConfiguration_for_Null; + /** + * HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns. + */ + howManyObjectsWithUppercase?: RuleConfiguration_for_Null; + /** + * HowManyRedudantIndex (B002): Count number of redundant index vs nb index. + */ + howManyRedudantIndex?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key. + */ + howManyTableWithoutIndexOnFk?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key. + */ + howManyTableWithoutPrimaryKey?: RuleConfiguration_for_Null; + /** + * HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected. + */ + howManyTablesNeverSelected?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type. + */ + howManyTablesWithFkMismatch?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema. + */ + howManyTablesWithFkOutsideSchema?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names. + */ + howManyTablesWithReservedKeywords?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers. + */ + howManyTablesWithSameTrigger?: RuleConfiguration_for_Null; + /** + * HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.) + */ + howManyUnusedIndex?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners. + */ + severalTableOwnerInSchema?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Cluster { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256. + */ + passwordEncryptionIsMd5?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustShouldNotExists?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; +} +/** + * A list of rules that belong to this group + */ +export interface Schema { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary). + */ + ownerSchemaIsInternalRole?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match. + */ + schemaOwnerDoNotMatchTableOwner?: RuleConfiguration_for_Null; + /** + * SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy. + */ + schemaPrefixedOrSuffixedWithEnvt?: RuleConfiguration_for_Null; + /** + * SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it. + */ + schemaWithDefaultRoleNotGranted?: RuleConfiguration_for_Null; + /** + * UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects. + */ + unsecuredPublicSchema?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Performance { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * Auth RLS Initialization Plan: Detects if calls to `current_setting()` and `auth.()` in RLS policies are being unnecessarily re-evaluated for each row + */ + authRlsInitplan?: RuleConfiguration_for_Null; + /** + * Duplicate Index: Detects cases where two ore more identical indexes exist. + */ + duplicateIndex?: RuleConfiguration_for_Null; + /** + * Multiple Permissive Policies: Detects if multiple permissive row level security policies are present on a table for the same `role` and `action` (e.g. insert). Multiple permissive policies are suboptimal for performance as each policy must be executed for every relevant query. + */ + multiplePermissivePolicies?: RuleConfiguration_for_Null; + /** + * No Primary Key: Detects if a table does not have a primary key. Tables without a primary key can be inefficient to interact with at scale. + */ + noPrimaryKey?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * Table Bloat: Detects if a table has excess bloat and may benefit from maintenance operations like vacuum full or cluster. + */ + tableBloat?: RuleConfiguration_for_Null; + /** + * Unindexed foreign keys: Identifies foreign key constraints without a covering index, which can impact database performance. + */ + unindexedForeignKeys?: RuleConfiguration_for_Null; + /** + * Unused Index: Detects if an index has never been used and may be a candidate for removal. + */ + unusedIndex?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Security { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * Exposed Auth Users: Detects if auth.users is exposed to anon or authenticated roles via a view or materialized view in schemas exposed to PostgREST, potentially compromising user data security. + */ + authUsersExposed?: RuleConfiguration_for_Null; + /** + * Extension in Public: Detects extensions installed in the `public` schema. + */ + extensionInPublic?: RuleConfiguration_for_Null; + /** + * Extension Versions Outdated: Detects extensions that are not using the default (recommended) version. + */ + extensionVersionsOutdated?: RuleConfiguration_for_Null; + /** + * Foreign Key to Auth Unique Constraint: Detects user defined foreign keys to unique constraints in the auth schema. + */ + fkeyToAuthUnique?: RuleConfiguration_for_Null; + /** + * Foreign Table in API: Detects foreign tables that are accessible over APIs. Foreign tables do not respect row level security policies. + */ + foreignTableInApi?: RuleConfiguration_for_Null; + /** + * Function Search Path Mutable: Detects functions where the search_path parameter is not set. + */ + functionSearchPathMutable?: RuleConfiguration_for_Null; + /** + * Insecure Queue Exposed in API: Detects cases where an insecure Queue is exposed over Data APIs + */ + insecureQueueExposedInApi?: RuleConfiguration_for_Null; + /** + * Materialized View in API: Detects materialized views that are accessible over the Data APIs. + */ + materializedViewInApi?: RuleConfiguration_for_Null; + /** + * Policy Exists RLS Disabled: Detects cases where row level security (RLS) policies have been created, but RLS has not been enabled for the underlying table. + */ + policyExistsRlsDisabled?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * RLS Disabled in Public: Detects cases where row level security (RLS) has not been enabled on tables in schemas exposed to PostgREST + */ + rlsDisabledInPublic?: RuleConfiguration_for_Null; + /** + * RLS Enabled No Policy: Detects cases where row level security (RLS) has been enabled on a table but no RLS policies have been created. + */ + rlsEnabledNoPolicy?: RuleConfiguration_for_Null; + /** + * RLS references user metadata: Detects when Supabase Auth user_metadata is referenced insecurely in a row level security (RLS) policy. + */ + rlsReferencesUserMetadata?: RuleConfiguration_for_Null; + /** + * Security Definer View: Detects views defined with the SECURITY DEFINER property. These views enforce Postgres permissions and row level security policies (RLS) of the view creator, rather than that of the querying user + */ + securityDefinerView?: RuleConfiguration_for_Null; + /** + * Unsupported reg types: Identifies columns using unsupported reg* types outside pg_catalog schema, which prevents database upgrades using pg_upgrade. + */ + unsupportedRegTypes?: RuleConfiguration_for_Null; +} export type RuleConfiguration_for_Null = | RulePlainConfiguration | RuleWithOptions_for_Null; diff --git a/xtask/codegen/Cargo.toml b/xtask/codegen/Cargo.toml index c5e95ebe4..24898d319 100644 --- a/xtask/codegen/Cargo.toml +++ b/xtask/codegen/Cargo.toml @@ -16,9 +16,11 @@ pgls_analyse = { workspace = true } pgls_analyser = { workspace = true } pgls_diagnostics = { workspace = true } pgls_env = { workspace = true } +pgls_pglinter = { workspace = true } pgls_splinter = { workspace = true } pgls_workspace = { workspace = true, features = ["schema"] } proc-macro2 = { workspace = true, features = ["span-locations"] } pulldown-cmark = { version = "0.12.2" } quote = "1.0.36" +regex = "1.11" xtask = { path = '../', version = "0.0" } diff --git a/xtask/codegen/src/generate_configuration.rs b/xtask/codegen/src/generate_configuration.rs index 2824974e5..df3af8eec 100644 --- a/xtask/codegen/src/generate_configuration.rs +++ b/xtask/codegen/src/generate_configuration.rs @@ -14,11 +14,17 @@ use xtask::*; struct ToolConfig { name: &'static str, category: RuleCategory, + /// Whether this tool operates on files (vs database) + handles_files: bool, } impl ToolConfig { - const fn new(name: &'static str, category: RuleCategory) -> Self { - Self { name, category } + const fn new(name: &'static str, category: RuleCategory, handles_files: bool) -> Self { + Self { + name, + category, + handles_files, + } } /// Derived: Directory name under pgls_configuration/src/ @@ -72,10 +78,10 @@ impl ToolConfig { /// All supported tools const TOOLS: &[ToolConfig] = &[ - ToolConfig::new("linter", RuleCategory::Lint), - ToolConfig::new("assists", RuleCategory::Action), - ToolConfig::new("splinter", RuleCategory::Lint), - ToolConfig::new("pglinter", RuleCategory::Lint), + ToolConfig::new("linter", RuleCategory::Lint, true), + ToolConfig::new("assists", RuleCategory::Action, true), + ToolConfig::new("splinter", RuleCategory::Lint, false), // Database linter, doesn't handle files + ToolConfig::new("pglinter", RuleCategory::Lint, false), // Database linter via pglinter extension ]; /// Visitor that collects rules for a specific category @@ -115,6 +121,7 @@ impl RegistryVisitor for CategoryRulesVisitor { pub fn generate_rules_configuration(mode: Mode) -> Result<()> { generate_tool_configuration(mode, "linter")?; generate_tool_configuration(mode, "splinter")?; + generate_tool_configuration(mode, "pglinter")?; Ok(()) } @@ -134,8 +141,8 @@ pub fn generate_tool_configuration(mode: Mode, tool_name: &str) -> Result<()> { match tool.name { "linter" => pgls_analyser::visit_registry(&mut visitor), "splinter" => pgls_splinter::registry::visit_registry(&mut visitor), + "pglinter" => pgls_pglinter::registry::visit_registry(&mut visitor), "assists" => unimplemented!("Assists rules not yet implemented"), - "pglinter" => unimplemented!("PGLinter rules not yet implemented"), _ => unreachable!(), } @@ -172,12 +179,44 @@ fn generate_lint_mod_file(tool: &ToolConfig) -> String { let generated_file = tool.generated_file().trim_end_matches(".rs"); let generated_file_ident = Ident::new(generated_file, Span::call_site()); + // Only file-based tools need ignore/include fields + let handles_files = tool.handles_files; + + let file_fields = if handles_files { + quote! { + /// A list of Unix shell style patterns. The linter will ignore files/folders that will match these patterns. + #[partial(bpaf(hide))] + pub ignore: StringSet, + + /// A list of Unix shell style patterns. The linter will include files/folders that will match these patterns. + #[partial(bpaf(hide))] + pub include: StringSet, + } + } else { + quote! {} + }; + + let file_defaults = if handles_files { + quote! { + ignore: Default::default(), + include: Default::default(), + } + } else { + quote! {} + }; + + let string_set_import = if handles_files { + quote! { use biome_deserialize::StringSet; } + } else { + quote! {} + }; + let content = quote! { //! Generated file, do not edit by hand, see `xtask/codegen` mod #generated_file_ident; - use biome_deserialize::StringSet; + #string_set_import use biome_deserialize_macros::{Merge, Partial}; use bpaf::Bpaf; pub use #generated_file_ident::*; @@ -196,13 +235,7 @@ fn generate_lint_mod_file(tool: &ToolConfig) -> String { #[partial(bpaf(pure(Default::default()), optional, hide))] pub rules: Rules, - /// A list of Unix shell style patterns. The linter will ignore files/folders that will match these patterns. - #[partial(bpaf(hide))] - pub ignore: StringSet, - - /// A list of Unix shell style patterns. The linter will include files/folders that will match these patterns. - #[partial(bpaf(hide))] - pub include: StringSet, + #file_fields } impl #config_struct { @@ -216,8 +249,7 @@ fn generate_lint_mod_file(tool: &ToolConfig) -> String { Self { enabled: true, rules: Default::default(), - ignore: Default::default(), - include: Default::default(), + #file_defaults } } } @@ -288,6 +320,10 @@ fn generate_lint_rules_file( } let category_prefix = tool.category_prefix(); + // Schema name for the Rules struct (e.g., "LinterRules", "SplinterRules") + let rules_schema_name = format!("{}Rules", to_capitalized(tool.name)); + let rules_schema_name_lit = Literal::string(&rules_schema_name); + let rules_struct_content = quote! { //! Generated file, do not edit by hand, see `xtask/codegen` @@ -328,6 +364,7 @@ fn generate_lint_rules_file( #[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] #[cfg_attr(feature = "schema", derive(JsonSchema))] + #[cfg_attr(feature = "schema", schemars(rename = #rules_schema_name_lit))] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct Rules { /// It enables the lint rules recommended by Postgres Language Server. `true` by default. @@ -496,9 +533,9 @@ fn generate_lint_group_struct( #rule }); - // For splinter rules, use () as options since they don't have configurable options + // For splinter/pglinter rules, use () as options since they don't have configurable options // For linter rules, use pgls_analyser::options::#rule_name - let rule_option_type = if tool_name == "splinter" { + let rule_option_type = if tool_name == "splinter" || tool_name == "pglinter" { quote! { () } } else { quote! { pgls_analyser::options::#rule_name } @@ -657,6 +694,7 @@ fn generate_lint_group_struct( } /// Extract the first paragraph from markdown documentation as a summary +/// Stops at the first heading (## etc) or end of first paragraph fn extract_summary_from_docs(docs: &str) -> String { let mut summary = String::new(); let parser = Parser::new(docs); @@ -677,6 +715,16 @@ fn extract_summary_from_docs(docs: &str) -> String { Event::End(TagEnd::Paragraph) => { break; } + // Stop at H2+ headings (subsections) - H1 is the title + Event::Start(Tag::Heading { level, .. }) + if level != pulldown_cmark::HeadingLevel::H1 => + { + break; + } + // Add separator after H1 heading ends + Event::End(TagEnd::Heading(_)) => { + summary.push_str(": "); + } Event::Start(tag) => match tag { Tag::Strong | Tag::Paragraph => continue, _ => { diff --git a/xtask/codegen/src/generate_pglinter.rs b/xtask/codegen/src/generate_pglinter.rs new file mode 100644 index 000000000..c6fa8b48e --- /dev/null +++ b/xtask/codegen/src/generate_pglinter.rs @@ -0,0 +1,687 @@ +use anyhow::{Context, Result}; +use biome_string_case::Case; +use quote::{format_ident, quote}; +use regex::Regex; +use std::collections::BTreeMap; +use std::path::Path; +use xtask::{glue::fs2, project_root, Mode}; + +use crate::update; + +/// Metadata extracted from rules.sql INSERT statements +#[derive(Debug, Clone)] +struct PglinterRuleMeta { + /// Rule name in PascalCase (e.g., "HowManyTableWithoutPrimaryKey") + name: String, + /// Rule name in snake_case (e.g., "how_many_table_without_primary_key") + snake_name: String, + /// Rule name in camelCase (e.g., "howManyTableWithoutPrimaryKey") + camel_name: String, + /// Rule code (e.g., "B001") + code: String, + /// Scope: BASE, SCHEMA, or CLUSTER + scope: String, + /// Description of the rule + description: String, + /// Message template with placeholders + message: String, + /// Suggested fixes + fixes: Vec, + /// Warning threshold percentage + warning_level: i32, + /// Error threshold percentage + error_level: i32, +} + +/// Parse pglinter rules from rules.sql and generate Rust code +pub fn generate_pglinter() -> Result<()> { + let rules_sql_path = project_root().join("crates/pgls_pglinter/vendor/sql/rules.sql"); + + if !rules_sql_path.exists() { + anyhow::bail!( + "Vendor files not found at crates/pgls_pglinter/vendor/sql/rules.sql. Run 'cargo build -p pgls_pglinter' first to download them." + ); + } + + let sql_content = fs2::read_to_string(&rules_sql_path)?; + let rules = parse_rules_sql(&sql_content)?; + + // Generate rule files + generate_rule_trait()?; + generate_rule_files(&rules)?; + generate_registry(&rules)?; + update_categories_file(&rules)?; + + Ok(()) +} + +/// Parse INSERT statements from rules.sql to extract rule metadata +fn parse_rules_sql(content: &str) -> Result> { + let mut rules = BTreeMap::new(); + + // Normalize the content: remove newlines within parentheses to make regex easier + // This handles multi-line ARRAY declarations + let normalized = normalize_sql_values(content); + + // Use regex to find value tuples + // Pattern: ('Name', 'CODE', num, num, 'SCOPE', 'desc', 'msg', ARRAY[...]) + let value_pattern = Regex::new( + r#"\(\s*'([^']+)',\s*'([^']+)',\s*(\d+),\s*(\d+),\s*'([^']+)',\s*'([^']+)',\s*'([^']+)',\s*ARRAY\s*\[(.*?)\]\s*\)"#, + )?; + + for caps in value_pattern.captures_iter(&normalized) { + let name = caps.get(1).unwrap().as_str().to_string(); + let code = caps.get(2).unwrap().as_str().to_string(); + let warning_level: i32 = caps.get(3).unwrap().as_str().parse()?; + let error_level: i32 = caps.get(4).unwrap().as_str().parse()?; + let scope = caps.get(5).unwrap().as_str().to_string(); + let description = caps + .get(6) + .unwrap() + .as_str() + .replace("''", "'") // Unescape single quotes + .to_string(); + let message = caps.get(7).unwrap().as_str().to_string(); + let fixes_str = caps.get(8).unwrap().as_str(); + + // Parse fixes array + let fixes: Vec = parse_fixes_array(fixes_str); + + let snake_name = Case::Snake.convert(&name); + let camel_name = to_camel_case(&name); + + let meta = PglinterRuleMeta { + name, + snake_name: snake_name.clone(), + camel_name, + code, + scope, + description, + message, + fixes, + warning_level, + error_level, + }; + + rules.insert(snake_name, meta); + } + + if rules.is_empty() { + anyhow::bail!("No rules found in rules.sql. Check the file format."); + } + + Ok(rules) +} + +/// Normalize SQL content by joining lines within value tuples +fn normalize_sql_values(content: &str) -> String { + let mut result = String::new(); + let mut in_value = false; + let mut paren_depth = 0; + + for c in content.chars() { + match c { + '(' => { + paren_depth += 1; + in_value = true; + result.push(c); + } + ')' => { + paren_depth -= 1; + if paren_depth == 0 { + in_value = false; + } + result.push(c); + } + '\n' | '\r' if in_value => { + result.push(' '); // Replace newlines with spaces inside values + } + _ => result.push(c), + } + } + + result +} + +/// Parse ARRAY['fix1', 'fix2'] into Vec +fn parse_fixes_array(s: &str) -> Vec { + let fix_pattern = Regex::new(r#"'([^']+)'"#).unwrap(); + fix_pattern + .captures_iter(s) + .map(|cap| cap.get(1).unwrap().as_str().to_string()) + .collect() +} + +/// Convert PascalCase to camelCase +fn to_camel_case(s: &str) -> String { + let mut chars = s.chars(); + match chars.next() { + None => String::new(), + Some(first) => first.to_lowercase().collect::() + chars.as_str(), + } +} + +/// Map scope to category directory name +fn scope_to_category(scope: &str) -> &'static str { + match scope { + "BASE" => "base", + "SCHEMA" => "schema", + "CLUSTER" => "cluster", + _ => "base", + } +} + +/// Generate src/rule.rs with PglinterRule trait +fn generate_rule_trait() -> Result<()> { + let rule_path = project_root().join("crates/pgls_pglinter/src/rule.rs"); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + use pgls_analyse::RuleMeta; + + /// Trait for pglinter (database-level) rules + /// + /// Pglinter rules are different from linter rules: + /// - They execute SQL queries against the database via pglinter extension + /// - They don't have AST-based execution + /// - Rule logic is in the pglinter Postgres extension + /// - Threshold configuration (warning/error levels) is handled by pglinter extension + pub trait PglinterRule: RuleMeta { + /// Rule code (e.g., "B001", "S001", "C001") + const CODE: &'static str; + + /// Rule scope (BASE, SCHEMA, or CLUSTER) + const SCOPE: &'static str; + + /// Description of what the rule detects + const DESCRIPTION: &'static str; + + /// Suggested fixes for violations + const FIXES: &'static [&'static str]; + } + }; + + let formatted = xtask::reformat(content)?; + update(&rule_path, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate rule files in src/rules/{category}/{rule_name}.rs +fn generate_rule_files(rules: &BTreeMap) -> Result<()> { + let rules_dir = project_root().join("crates/pgls_pglinter/src/rules"); + + // Group rules by scope/category + let mut rules_by_category: BTreeMap> = BTreeMap::new(); + for rule in rules.values() { + let category = scope_to_category(&rule.scope).to_string(); + rules_by_category.entry(category).or_default().push(rule); + } + + // Generate category directories and files + for (category, category_rules) in &rules_by_category { + let category_dir = rules_dir.join(category); + fs2::create_dir_all(&category_dir)?; + + // Generate individual rule files + for rule in category_rules { + generate_rule_file(&category_dir, rule)?; + } + + // Generate category mod.rs + generate_category_mod(&category_dir, category, category_rules)?; + } + + // Generate main rules/mod.rs + generate_rules_mod(&rules_dir, &rules_by_category)?; + + Ok(()) +} + +/// Generate individual rule file +fn generate_rule_file(category_dir: &Path, rule: &PglinterRuleMeta) -> Result<()> { + let rule_file = category_dir.join(format!("{}.rs", rule.snake_name)); + + let struct_name = format_ident!("{}", rule.name); + let camel_name = &rule.camel_name; + let code = &rule.code; + let scope = &rule.scope; + let description = &rule.description; + let warning_level = rule.warning_level; + let error_level = rule.error_level; + let category = scope_to_category(&rule.scope); + + // Create fixes as static slice + let fixes: Vec<&str> = rule.fixes.iter().map(|s| s.as_str()).collect(); + + // Build doc string + let doc_string = format!( + r#"# {} ({}) + +{} + +## Configuration + +Enable or disable this rule in your configuration: + +```json +{{ + "pglinter": {{ + "rules": {{ + "{}": {{ + "{}": "warn" + }} + }} + }} +}} +``` + +## Thresholds + +- Warning level: {}% +- Error level: {}% + +## Fixes + +{} + +## Documentation + +See: "#, + rule.name, + code, + description, + category, + camel_name, + warning_level, + error_level, + rule.fixes + .iter() + .map(|f| format!("- {f}")) + .collect::>() + .join("\n"), + code.to_lowercase(), + ); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + use crate::rule::PglinterRule; + + ::pgls_analyse::declare_rule! { + #[doc = #doc_string] + pub #struct_name { + version: "1.0.0", + name: #camel_name, + severity: pgls_diagnostics::Severity::Warning, + recommended: true, + } + } + + impl PglinterRule for #struct_name { + const CODE: &'static str = #code; + const SCOPE: &'static str = #scope; + const DESCRIPTION: &'static str = #description; + const FIXES: &'static [&'static str] = &[#(#fixes),*]; + } + }; + + let formatted = xtask::reformat(content)?; + update(&rule_file, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate category mod.rs that exports all rules +fn generate_category_mod( + category_dir: &Path, + category: &str, + rules: &[&PglinterRuleMeta], +) -> Result<()> { + let mod_file = category_dir.join("mod.rs"); + + let category_title = Case::Pascal.convert(category); + let category_struct = format_ident!("{}", category_title); + + // Generate mod declarations + let mod_names: Vec<_> = rules + .iter() + .map(|r| format_ident!("{}", r.snake_name)) + .collect(); + + // Generate rule paths for declare_lint_group! + let rule_paths: Vec<_> = rules + .iter() + .map(|r| { + let mod_name = format_ident!("{}", r.snake_name); + let struct_name = format_ident!("{}", r.name); + quote! { self::#mod_name::#struct_name } + }) + .collect(); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + #( pub mod #mod_names; )* + + ::pgls_analyse::declare_lint_group! { + pub #category_struct { + name: #category, + rules: [ + #( #rule_paths, )* + ] + } + } + }; + + let formatted = xtask::reformat(content)?; + update(&mod_file, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate main rules/mod.rs +fn generate_rules_mod( + rules_dir: &Path, + rules_by_category: &BTreeMap>, +) -> Result<()> { + let mod_file = rules_dir.join("mod.rs"); + + let category_mods: Vec<_> = rules_by_category + .keys() + .map(|cat| { + let mod_name = format_ident!("{}", cat); + quote! { pub mod #mod_name; } + }) + .collect(); + + // Generate group paths for declare_category! + let group_paths: Vec<_> = rules_by_category + .keys() + .map(|cat| { + let mod_name = format_ident!("{}", cat); + let group_name = format_ident!("{}", Case::Pascal.convert(cat)); + quote! { self::#mod_name::#group_name } + }) + .collect(); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + #( #category_mods )* + + ::pgls_analyse::declare_category! { + pub PgLinter { + kind: Lint, + groups: [ + #( #group_paths, )* + ] + } + } + }; + + let formatted = xtask::reformat(content)?; + update(&mod_file, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate src/registry.rs with visit_registry() and get_rule_category() +fn generate_registry(rules: &BTreeMap) -> Result<()> { + let registry_path = project_root().join("crates/pgls_pglinter/src/registry.rs"); + + // Generate match arms for rule code lookup (camelCase → code) + let code_arms: Vec<_> = rules + .values() + .map(|rule| { + let camel_name = &rule.camel_name; + let code = &rule.code; + quote! { + #camel_name => Some(#code) + } + }) + .collect(); + + // Generate match arms for category lookup (code → &'static Category) + let category_arms: Vec<_> = rules + .values() + .map(|rule| { + let code = &rule.code; + let category = scope_to_category(&rule.scope); + let camel_name = &rule.camel_name; + let category_path = format!("pglinter/{category}/{camel_name}"); + + quote! { + #code => Some(::pgls_diagnostics::category!(#category_path)) + } + }) + .collect(); + + // Generate match arms for rule metadata lookup by name + let metadata_arms: Vec<_> = rules + .values() + .map(|rule| { + let camel_name = &rule.camel_name; + let code = &rule.code; + let scope = &rule.scope; + let description = &rule.description; + let fixes: Vec<&str> = rule.fixes.iter().map(|s| s.as_str()).collect(); + + quote! { + #camel_name => Some(RuleMetadata { + code: #code, + name: #camel_name, + scope: #scope, + description: #description, + fixes: &[#(#fixes),*], + }) + } + }) + .collect(); + + // Generate match arms for rule metadata lookup by code + let metadata_by_code_arms: Vec<_> = rules + .values() + .map(|rule| { + let camel_name = &rule.camel_name; + let code = &rule.code; + let scope = &rule.scope; + let description = &rule.description; + let fixes: Vec<&str> = rule.fixes.iter().map(|s| s.as_str()).collect(); + + quote! { + #code => Some(RuleMetadata { + code: #code, + name: #camel_name, + scope: #scope, + description: #description, + fixes: &[#(#fixes),*], + }) + } + }) + .collect(); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + use pgls_analyse::RegistryVisitor; + use pgls_diagnostics::Category; + + /// Metadata for a pglinter rule + #[derive(Debug, Clone, Copy)] + pub struct RuleMetadata { + /// Rule code (e.g., "B001") + pub code: &'static str, + /// Rule name in camelCase + pub name: &'static str, + /// Rule scope (BASE, SCHEMA, CLUSTER) + pub scope: &'static str, + /// Description of what the rule detects + pub description: &'static str, + /// Suggested fixes + pub fixes: &'static [&'static str], + } + + /// Visit all pglinter rules using the visitor pattern + pub fn visit_registry(registry: &mut V) { + registry.record_category::(); + } + + /// Get the pglinter rule code from the camelCase name + pub fn get_rule_code(name: &str) -> Option<&'static str> { + match name { + #( #code_arms, )* + _ => None, + } + } + + /// Get the diagnostic category for a rule code + pub fn get_rule_category(code: &str) -> Option<&'static Category> { + match code { + #( #category_arms, )* + _ => None, + } + } + + /// Get rule metadata by name (camelCase) + pub fn get_rule_metadata(name: &str) -> Option { + match name { + #( #metadata_arms, )* + _ => None, + } + } + + /// Get rule metadata by code (e.g., "B001", "S001", "C001") + pub fn get_rule_metadata_by_code(code: &str) -> Option { + match code { + #( #metadata_by_code_arms, )* + _ => None, + } + } + }; + + let formatted = xtask::reformat(content)?; + update(®istry_path, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Update the categories.rs file with pglinter rules +fn update_categories_file(rules: &BTreeMap) -> Result<()> { + let categories_path = + project_root().join("crates/pgls_diagnostics_categories/src/categories.rs"); + + let mut content = fs2::read_to_string(&categories_path)?; + + // Generate pglinter rule entries grouped by category + let mut pglinter_rules: Vec<(String, String)> = rules + .values() + .map(|rule| { + let category = scope_to_category(&rule.scope); + let url = format!( + "https://github.com/pmpetit/pglinter#{}", + rule.code.to_lowercase() + ); + + ( + category.to_string(), + format!( + " \"pglinter/{}/{}\": \"{}\",", + category, rule.camel_name, url + ), + ) + }) + .collect(); + + // Sort by category, then by entry + pglinter_rules.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1))); + + // Add meta diagnostics at the start + let mut all_entries = vec![ + " // Meta diagnostics".to_string(), + " \"pglinter/extensionNotInstalled\": \"Install the pglinter extension with: CREATE EXTENSION pglinter\",".to_string(), + " \"pglinter/ruleDisabledInExtension\": \"Enable the rule in the extension with: UPDATE pglinter.rules SET enable = true WHERE code = ''\",".to_string(), + ]; + + // Add rule categories + let mut current_category = String::new(); + for (category, entry) in &pglinter_rules { + if category != ¤t_category { + current_category = category.clone(); + all_entries.push(format!( + " // {} rules ({}-series)", + Case::Pascal.convert(category), + match category.as_str() { + "base" => "B", + "schema" => "S", + "cluster" => "C", + _ => "?", + } + )); + } + all_entries.push(entry.clone()); + } + + let pglinter_entries = all_entries.join("\n"); + + // Replace content between pglinter rules markers + let rules_start = "// pglinter rules start"; + let rules_end = "// pglinter rules end"; + + content = replace_between_markers( + &content, + rules_start, + rules_end, + &format!("\n{pglinter_entries}\n "), + )?; + + // Generate pglinter group entries + let mut categories: Vec = pglinter_rules.iter().map(|(cat, _)| cat.clone()).collect(); + categories.sort(); + categories.dedup(); + + let mut group_entries = vec![" \"pglinter\",".to_string()]; + for category in categories { + group_entries.push(format!(" \"pglinter/{category}\",")); + } + let groups_content = group_entries.join("\n"); + + // Replace content between pglinter groups markers + let groups_start = "// Pglinter groups start"; + let groups_end = "// Pglinter groups end"; + + content = replace_between_markers( + &content, + groups_start, + groups_end, + &format!("\n{groups_content}\n "), + )?; + + fs2::write(categories_path, content)?; + + Ok(()) +} + +/// Replace content between two markers +fn replace_between_markers( + content: &str, + start_marker: &str, + end_marker: &str, + new_content: &str, +) -> Result { + let start_pos = content + .find(start_marker) + .with_context(|| format!("Could not find '{start_marker}' marker"))?; + + let end_pos = content + .find(end_marker) + .with_context(|| format!("Could not find '{end_marker}' marker"))?; + + let mut result = String::new(); + result.push_str(&content[..start_pos + start_marker.len()]); + result.push_str(new_content); + result.push_str(&content[end_pos..]); + + Ok(result) +} diff --git a/xtask/codegen/src/generate_splinter.rs b/xtask/codegen/src/generate_splinter.rs index 6dc94bd0e..198169850 100644 --- a/xtask/codegen/src/generate_splinter.rs +++ b/xtask/codegen/src/generate_splinter.rs @@ -90,8 +90,8 @@ fn extract_metadata_from_sql(sql_path: &Path, category: &str) -> Result Result // Build comprehensive documentation let requires_supabase_note = if requires_supabase { - "\n/// \n/// **Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). \n/// It will be automatically skipped if these roles don't exist in your database.".to_string() + "\n\n**Note:** This rule requires Supabase roles (`anon`, `authenticated`, `service_role`). It will be automatically skipped if these roles don't exist in your database.".to_string() } else { String::new() }; + // Build doc string as proper markdown (no /// prefixes - those are for source code comments) let doc_string = format!( - r#"/// # {title} -/// -/// {description}{requires_supabase_note} -/// -/// ## SQL Query -/// -/// ```sql -{sql_query_commented} -/// ``` -/// -/// ## Configuration -/// -/// Enable or disable this rule in your configuration: -/// -/// ```json -/// {{ -/// "splinter": {{ -/// "rules": {{ -/// "{category_lower}": {{ -/// "{name}": "warn" -/// }} -/// }} -/// }} -/// }} -/// ``` -/// -/// ## Remediation -/// -/// See: <{remediation}>"#, - title = title, - description = description, - requires_supabase_note = requires_supabase_note, - sql_query_commented = sql_query - .lines() - .map(|line| format!("/// {line}")) - .collect::>() - .join("\n"), - category_lower = category_lower, - name = name, - remediation = remediation, + r#"# {title} + +{description}{requires_supabase_note} + +## SQL Query + +```sql +{sql_query} +``` + +## Configuration + +Enable or disable this rule in your configuration: + +```json +{{ + "splinter": {{ + "rules": {{ + "{category_lower}": {{ + "{name}": "warn" + }} + }} + }} +}} +``` + +## Remediation + +See: <{remediation}>"#, ); let content = quote! { @@ -326,6 +316,7 @@ fn generate_rule_file(category_dir: &Path, metadata: &SqlRuleMetadata) -> Result version: "1.0.0", name: #name, severity: #severity, + recommended: true, } } diff --git a/xtask/codegen/src/lib.rs b/xtask/codegen/src/lib.rs index 3ed82ace1..268439e05 100644 --- a/xtask/codegen/src/lib.rs +++ b/xtask/codegen/src/lib.rs @@ -5,6 +5,7 @@ mod generate_bindings; mod generate_configuration; mod generate_crate; mod generate_new_analyser_rule; +mod generate_pglinter; mod generate_splinter; pub use self::generate_analyser::generate_analyser; @@ -12,6 +13,7 @@ pub use self::generate_bindings::generate_bindings; pub use self::generate_configuration::{generate_rules_configuration, generate_tool_configuration}; pub use self::generate_crate::generate_crate; pub use self::generate_new_analyser_rule::generate_new_analyser_rule; +pub use self::generate_pglinter::generate_pglinter; pub use self::generate_splinter::generate_splinter; use bpaf::Bpaf; use generate_new_analyser_rule::Category; @@ -95,4 +97,7 @@ pub enum TaskCommand { /// Generate splinter categories from the SQL file #[bpaf(command)] Splinter, + /// Generate pglinter rules from pglinter_repo/sql/rules.sql + #[bpaf(command)] + Pglinter, } diff --git a/xtask/codegen/src/main.rs b/xtask/codegen/src/main.rs index 43d11b44c..ca425db00 100644 --- a/xtask/codegen/src/main.rs +++ b/xtask/codegen/src/main.rs @@ -3,7 +3,7 @@ use xtask::{project_root, pushd, Result}; use xtask_codegen::{ generate_analyser, generate_bindings, generate_crate, generate_new_analyser_rule, - generate_rules_configuration, generate_splinter, task_command, TaskCommand, + generate_pglinter, generate_rules_configuration, generate_splinter, task_command, TaskCommand, }; fn main() -> Result<()> { @@ -34,6 +34,9 @@ fn main() -> Result<()> { TaskCommand::Splinter => { generate_splinter()?; } + TaskCommand::Pglinter => { + generate_pglinter()?; + } } Ok(())