From c78f95c08b8fbd14228860d225e5cf6965003983 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Mon, 29 Dec 2025 13:40:53 +0100 Subject: [PATCH 01/10] feat(dblint): add splinter --- .github/actions/setup-postgres/action.yml | 37 +- .gitignore | 1 + Cargo.lock | 16 +- Cargo.toml | 2 +- Dockerfile | 17 +- crates/pgls_configuration/src/lib.rs | 12 + crates/pgls_configuration/src/pglinter/mod.rs | 41 + .../pgls_configuration/src/pglinter/rules.rs | 916 ++++++++++++++++++ .../pgls_configuration/src/rules/selector.rs | 24 +- .../src/categories.rs | 36 + crates/pgls_pglinter/Cargo.toml | 29 + crates/pgls_pglinter/src/cache.rs | 58 ++ crates/pgls_pglinter/src/diagnostics.rs | 183 ++++ crates/pgls_pglinter/src/lib.rs | 157 +++ crates/pgls_pglinter/src/registry.rs | 463 +++++++++ crates/pgls_pglinter/src/rule.rs | 21 + .../composite_primary_key_too_many_columns.rs | 15 + .../base/how_many_objects_with_uppercase.rs | 12 + .../src/rules/base/how_many_redudant_index.rs | 13 + .../how_many_table_without_index_on_fk.rs | 12 + .../how_many_table_without_primary_key.rs | 12 + .../base/how_many_tables_never_selected.rs | 12 + .../base/how_many_tables_with_fk_mismatch.rs | 15 + .../how_many_tables_with_fk_outside_schema.rs | 15 + .../how_many_tables_with_reserved_keywords.rs | 15 + .../base/how_many_tables_with_same_trigger.rs | 15 + .../src/rules/base/how_many_unused_index.rs | 12 + crates/pgls_pglinter/src/rules/base/mod.rs | 16 + .../base/several_table_owner_in_schema.rs | 12 + crates/pgls_pglinter/src/rules/cluster/mod.rs | 7 + .../cluster/password_encryption_is_md5.rs | 15 + ...hod_trust_or_password_should_not_exists.rs | 11 + ...ies_with_method_trust_should_not_exists.rs | 11 + crates/pgls_pglinter/src/rules/mod.rs | 7 + crates/pgls_pglinter/src/rules/schema/mod.rs | 9 + .../schema/owner_schema_is_internal_role.rs | 11 + .../schema_owner_do_not_match_table_owner.rs | 12 + .../schema_prefixed_or_suffixed_with_envt.rs | 13 + .../schema_with_default_role_not_granted.rs | 14 + .../rules/schema/unsecured_public_schema.rs | 11 + crates/pgls_pglinter/src/sarif.rs | 172 ++++ crates/pgls_pglinter/tests/diagnostics.rs | 267 +++++ crates/pgls_workspace/src/settings.rs | 40 + docs/schema.json | 368 +++++++ justfile | 1 + .../backend-jsonrpc/src/workspace.ts | 174 +++- .../backend-jsonrpc/src/workspace.ts | 174 +++- xtask/codegen/Cargo.toml | 2 + xtask/codegen/src/generate_configuration.rs | 8 +- xtask/codegen/src/generate_pglinter.rs | 687 +++++++++++++ xtask/codegen/src/lib.rs | 5 + xtask/codegen/src/main.rs | 5 +- 52 files changed, 4192 insertions(+), 21 deletions(-) create mode 100644 crates/pgls_configuration/src/pglinter/mod.rs create mode 100644 crates/pgls_configuration/src/pglinter/rules.rs create mode 100644 crates/pgls_pglinter/Cargo.toml create mode 100644 crates/pgls_pglinter/src/cache.rs create mode 100644 crates/pgls_pglinter/src/diagnostics.rs create mode 100644 crates/pgls_pglinter/src/lib.rs create mode 100644 crates/pgls_pglinter/src/registry.rs create mode 100644 crates/pgls_pglinter/src/rule.rs create mode 100644 crates/pgls_pglinter/src/rules/base/composite_primary_key_too_many_columns.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_objects_with_uppercase.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_redudant_index.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_table_without_index_on_fk.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_table_without_primary_key.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_tables_never_selected.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_mismatch.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_outside_schema.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_tables_with_reserved_keywords.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_tables_with_same_trigger.rs create mode 100644 crates/pgls_pglinter/src/rules/base/how_many_unused_index.rs create mode 100644 crates/pgls_pglinter/src/rules/base/mod.rs create mode 100644 crates/pgls_pglinter/src/rules/base/several_table_owner_in_schema.rs create mode 100644 crates/pgls_pglinter/src/rules/cluster/mod.rs create mode 100644 crates/pgls_pglinter/src/rules/cluster/password_encryption_is_md5.rs create mode 100644 crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_or_password_should_not_exists.rs create mode 100644 crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_should_not_exists.rs create mode 100644 crates/pgls_pglinter/src/rules/mod.rs create mode 100644 crates/pgls_pglinter/src/rules/schema/mod.rs create mode 100644 crates/pgls_pglinter/src/rules/schema/owner_schema_is_internal_role.rs create mode 100644 crates/pgls_pglinter/src/rules/schema/schema_owner_do_not_match_table_owner.rs create mode 100644 crates/pgls_pglinter/src/rules/schema/schema_prefixed_or_suffixed_with_envt.rs create mode 100644 crates/pgls_pglinter/src/rules/schema/schema_with_default_role_not_granted.rs create mode 100644 crates/pgls_pglinter/src/rules/schema/unsecured_public_schema.rs create mode 100644 crates/pgls_pglinter/src/sarif.rs create mode 100644 crates/pgls_pglinter/tests/diagnostics.rs create mode 100644 xtask/codegen/src/generate_pglinter.rs diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index a07fdfbcd..8615c84a3 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -53,20 +53,47 @@ runs: echo "Extension library files:" ls -la "$(pg_config --pkglibdir)/" | grep plpgsql || echo "No plpgsql_check library found" - # Install the pglpgsql_check extension on macOS (Part 2) - - name: Create extension in database + # Install the pglinter extension on macOS + - name: Install and compile pglinter + if: runner.os == 'macOS' + shell: bash + run: | + # First, ensure we're using the same PostgreSQL that the action installed + export PATH="$(pg_config --bindir):$PATH" + + # Clone and build pglinter + git clone https://github.com/pmpetit/pglinter.git + cd pglinter + + # Clean and compile + make USE_PGXS=1 clean + make USE_PGXS=1 all + + # Install (may need sudo depending on permissions) + sudo make USE_PGXS=1 install + + # Verify installation + echo "Extension control files:" + ls -la "$(pg_config --sharedir)/extension/" | grep pglinter || echo "No pglinter found" + + echo "Extension library files:" + ls -la "$(pg_config --pkglibdir)/" | grep pglinter || echo "No pglinter library found" + + # Create extensions in database on macOS + - name: Create extensions in database if: runner.os == 'macOS' shell: bash env: PGSERVICE: ${{ steps.postgres.outputs.service-name }} run: | psql -c "CREATE EXTENSION plpgsql_check;" + psql -c "CREATE EXTENSION pglinter;" # Verify installation - psql -c "SELECT extname, extversion FROM pg_extension WHERE extname = 'plpgsql_check';" + psql -c "SELECT extname, extversion FROM pg_extension WHERE extname IN ('plpgsql_check', 'pglinter');" - # For Linux, use custom Docker image with plpgsql_check - - name: Build and start PostgreSQL with plpgsql_check + # For Linux, use custom Docker image with plpgsql_check and pglinter + - name: Build and start PostgreSQL with extensions if: runner.os == 'Linux' shell: bash run: | diff --git a/.gitignore b/.gitignore index 4c22f22d6..102a5eb7e 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,4 @@ crates/pgt_treesitter_grammar/src/parser.c.codex-session-id .codex-session-id site/ +pglinter_repo/ diff --git a/Cargo.lock b/Cargo.lock index e9cace6a4..1fc0daaff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2752,7 +2752,6 @@ dependencies = [ "pgls_console", "pgls_diagnostics", "pgls_env", - "pgls_matcher", "pgls_text_size", "rustc-hash 2.1.0", "schemars", @@ -2923,12 +2922,20 @@ dependencies = [ ] [[package]] -name = "pgls_matcher" +name = "pgls_pglinter" version = "0.0.0" dependencies = [ + "insta", + "pgls_analyse", "pgls_console", "pgls_diagnostics", + "pgls_diagnostics_categories", + "pgls_schema_cache", + "pgls_test_utils", "rustc-hash 2.1.0", + "serde", + "serde_json", + "sqlx", ] [[package]] @@ -3010,10 +3017,8 @@ version = "0.0.0" dependencies = [ "insta", "pgls_analyse", - "pgls_configuration", "pgls_console", "pgls_diagnostics", - "pgls_matcher", "pgls_schema_cache", "pgls_test_utils", "serde", @@ -3172,7 +3177,6 @@ dependencies = [ "pgls_fs", "pgls_hover", "pgls_lexer", - "pgls_matcher", "pgls_plpgsql_check", "pgls_query", "pgls_query_ext", @@ -5633,11 +5637,13 @@ dependencies = [ "pgls_analyser", "pgls_diagnostics", "pgls_env", + "pgls_pglinter", "pgls_splinter", "pgls_workspace", "proc-macro2", "pulldown-cmark", "quote", + "regex", "xtask", ] diff --git a/Cargo.toml b/Cargo.toml index 46039494c..32c3c8773 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,7 +77,7 @@ pgls_lexer = { path = "./crates/pgls_lexer", version = "0.0.0" pgls_lexer_codegen = { path = "./crates/pgls_lexer_codegen", version = "0.0.0" } pgls_lsp = { path = "./crates/pgls_lsp", version = "0.0.0" } pgls_markup = { path = "./crates/pgls_markup", version = "0.0.0" } -pgls_matcher = { path = "./crates/pgls_matcher", version = "0.0.0" } +pgls_pglinter = { path = "./crates/pgls_pglinter", version = "0.0.0" } pgls_plpgsql_check = { path = "./crates/pgls_plpgsql_check", version = "0.0.0" } pgls_query = { path = "./crates/pgls_query", version = "0.0.0" } pgls_query_ext = { path = "./crates/pgls_query_ext", version = "0.0.0" } diff --git a/Dockerfile b/Dockerfile index 10353bb27..61cda3a26 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,25 @@ FROM postgres:15 -# Install build dependencies +# Install build dependencies and extensions RUN apt-get update && \ apt-get install -y postgresql-server-dev-15 gcc make git && \ + # Install plpgsql_check cd /tmp && \ git clone https://github.com/okbob/plpgsql_check.git && \ cd plpgsql_check && \ make && \ make install && \ + # Install pglinter + cd /tmp && \ + git clone https://github.com/pmpetit/pglinter.git && \ + cd pglinter && \ + make && \ + make install && \ + # Cleanup apt-get remove -y postgresql-server-dev-15 gcc make git && \ apt-get autoremove -y && \ - rm -rf /tmp/plpgsql_check /var/lib/apt/lists/* + rm -rf /tmp/plpgsql_check /tmp/pglinter /var/lib/apt/lists/* -# Add initialization script directly -RUN echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" > /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file +# Add initialization script for extensions +RUN echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" > /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS pglinter;" >> /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file diff --git a/crates/pgls_configuration/src/lib.rs b/crates/pgls_configuration/src/lib.rs index 8d14a331b..3f2daad33 100644 --- a/crates/pgls_configuration/src/lib.rs +++ b/crates/pgls_configuration/src/lib.rs @@ -7,6 +7,7 @@ pub mod diagnostics; pub mod files; pub mod linter; pub mod migrations; +pub mod pglinter; pub mod plpgsql_check; pub mod rules; pub mod splinter; @@ -32,6 +33,9 @@ pub use linter::{ use migrations::{ MigrationsConfiguration, PartialMigrationsConfiguration, partial_migrations_configuration, }; +use pglinter::{ + PartialPglinterConfiguration, PglinterConfiguration, partial_pglinter_configuration, +}; use pgls_env::PGLS_WEBSITE; use plpgsql_check::{ PartialPlPgSqlCheckConfiguration, PlPgSqlCheckConfiguration, @@ -93,6 +97,10 @@ pub struct Configuration { #[partial(type, bpaf(external(partial_splinter_configuration), optional))] pub splinter: SplinterConfiguration, + /// The configuration for pglinter + #[partial(type, bpaf(external(partial_pglinter_configuration), optional))] + pub pglinter: PglinterConfiguration, + /// The configuration for type checking #[partial(type, bpaf(external(partial_typecheck_configuration), optional))] pub typecheck: TypecheckConfiguration, @@ -138,6 +146,10 @@ impl PartialConfiguration { enabled: Some(true), ..Default::default() }), + pglinter: Some(PartialPglinterConfiguration { + enabled: Some(false), // Disabled by default since pglinter extension might not be installed + ..Default::default() + }), typecheck: Some(PartialTypecheckConfiguration { ..Default::default() }), diff --git a/crates/pgls_configuration/src/pglinter/mod.rs b/crates/pgls_configuration/src/pglinter/mod.rs new file mode 100644 index 000000000..f676abf1f --- /dev/null +++ b/crates/pgls_configuration/src/pglinter/mod.rs @@ -0,0 +1,41 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +mod rules; +use biome_deserialize_macros::{Merge, Partial}; +use bpaf::Bpaf; +pub use rules::*; +use serde::{Deserialize, Serialize}; +#[derive(Clone, Debug, Deserialize, Eq, Partial, PartialEq, Serialize)] +#[partial(derive(Bpaf, Clone, Eq, Merge, PartialEq))] +#[partial(cfg_attr(feature = "schema", derive(schemars::JsonSchema)))] +#[partial(serde(rename_all = "camelCase", default, deny_unknown_fields))] +pub struct PglinterConfiguration { + #[doc = r" if `false`, it disables the feature and the linter won't be executed. `true` by default"] + #[partial(bpaf(hide))] + pub enabled: bool, + #[doc = r" List of rules"] + #[partial(bpaf(pure(Default::default()), optional, hide))] + pub rules: Rules, +} +impl PglinterConfiguration { + pub const fn is_disabled(&self) -> bool { + !self.enabled + } +} +impl Default for PglinterConfiguration { + fn default() -> Self { + Self { + enabled: true, + rules: Default::default(), + } + } +} +impl PartialPglinterConfiguration { + pub const fn is_disabled(&self) -> bool { + matches!(self.enabled, Some(false)) + } + pub fn get_rules(&self) -> Rules { + self.rules.clone().unwrap_or_default() + } +} diff --git a/crates/pgls_configuration/src/pglinter/rules.rs b/crates/pgls_configuration/src/pglinter/rules.rs new file mode 100644 index 000000000..b22721a80 --- /dev/null +++ b/crates/pgls_configuration/src/pglinter/rules.rs @@ -0,0 +1,916 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rules::{RuleConfiguration, RulePlainConfiguration}; +use biome_deserialize_macros::Merge; +use pgls_analyse::RuleFilter; +use pgls_analyser::RuleOptions; +use pgls_diagnostics::{Category, Severity}; +use rustc_hash::FxHashSet; +#[cfg(feature = "schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[derive( + Clone, + Copy, + Debug, + Eq, + Hash, + Merge, + Ord, + PartialEq, + PartialOrd, + serde :: Deserialize, + serde :: Serialize, +)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase")] +pub enum RuleGroup { + Base, + Cluster, + Schema, +} +impl RuleGroup { + pub const fn as_str(self) -> &'static str { + match self { + Self::Base => Base::GROUP_NAME, + Self::Cluster => Cluster::GROUP_NAME, + Self::Schema => Schema::GROUP_NAME, + } + } +} +impl std::str::FromStr for RuleGroup { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + Base::GROUP_NAME => Ok(Self::Base), + Cluster::GROUP_NAME => Ok(Self::Cluster), + Schema::GROUP_NAME => Ok(Self::Schema), + _ => Err("This rule group doesn't exist."), + } + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[cfg_attr(feature = "schema", schemars(rename = "PglinterRules"))] +#[serde(rename_all = "camelCase", deny_unknown_fields)] +pub struct Rules { + #[doc = r" It enables the lint rules recommended by Postgres Language Server. `true` by default."] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules. The rules that belong to `nursery` won't be enabled."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub cluster: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub schema: Option, +} +impl Rules { + #[doc = r" Checks if the code coming from [pgls_diagnostics::Diagnostic] corresponds to a rule."] + #[doc = r" Usually the code is built like {group}/{rule_name}"] + pub fn has_rule(group: RuleGroup, rule_name: &str) -> Option<&'static str> { + match group { + RuleGroup::Base => Base::has_rule(rule_name), + RuleGroup::Cluster => Cluster::has_rule(rule_name), + RuleGroup::Schema => Schema::has_rule(rule_name), + } + } + #[doc = r" Given a category coming from [Diagnostic](pgls_diagnostics::Diagnostic), this function returns"] + #[doc = r" the [Severity](pgls_diagnostics::Severity) associated to the rule, if the configuration changed it."] + #[doc = r" If the severity is off or not set, then the function returns the default severity of the rule,"] + #[doc = r" which is configured at the rule definition."] + #[doc = r" The function can return `None` if the rule is not properly configured."] + pub fn get_severity_from_code(&self, category: &Category) -> Option { + let mut split_code = category.name().split('/'); + let _category_prefix = split_code.next(); + debug_assert_eq!(_category_prefix, Some("pglinter")); + let group = ::from_str(split_code.next()?).ok()?; + let rule_name = split_code.next()?; + let rule_name = Self::has_rule(group, rule_name)?; + let severity = match group { + RuleGroup::Base => self + .base + .as_ref() + .and_then(|group| group.get_rule_configuration(rule_name)) + .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) + .map_or_else(|| Base::severity(rule_name), |(level, _)| level.into()), + RuleGroup::Cluster => self + .cluster + .as_ref() + .and_then(|group| group.get_rule_configuration(rule_name)) + .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) + .map_or_else(|| Cluster::severity(rule_name), |(level, _)| level.into()), + RuleGroup::Schema => self + .schema + .as_ref() + .and_then(|group| group.get_rule_configuration(rule_name)) + .filter(|(level, _)| !matches!(level, RulePlainConfiguration::Off)) + .map_or_else(|| Schema::severity(rule_name), |(level, _)| level.into()), + }; + Some(severity) + } + #[doc = r" Ensure that `recommended` is set to `true` or implied."] + pub fn set_recommended(&mut self) { + if self.all != Some(true) && self.recommended == Some(false) { + self.recommended = Some(true) + } + if let Some(group) = &mut self.base { + group.recommended = None; + } + if let Some(group) = &mut self.cluster { + group.recommended = None; + } + if let Some(group) = &mut self.schema { + group.recommended = None; + } + } + pub(crate) const fn is_recommended_false(&self) -> bool { + matches!(self.recommended, Some(false)) + } + pub(crate) const fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + #[doc = r" It returns the enabled rules by default."] + #[doc = r""] + #[doc = r" The enabled rules are calculated from the difference with the disabled rules."] + pub fn as_enabled_rules(&self) -> FxHashSet> { + let mut enabled_rules = FxHashSet::default(); + let mut disabled_rules = FxHashSet::default(); + if let Some(group) = self.base.as_ref() { + group.collect_preset_rules( + self.is_all_true(), + !self.is_recommended_false(), + &mut enabled_rules, + ); + enabled_rules.extend(&group.get_enabled_rules()); + disabled_rules.extend(&group.get_disabled_rules()); + } else if self.is_all_true() { + enabled_rules.extend(Base::all_rules_as_filters()); + } else if !self.is_recommended_false() { + enabled_rules.extend(Base::recommended_rules_as_filters()); + } + if let Some(group) = self.cluster.as_ref() { + group.collect_preset_rules( + self.is_all_true(), + !self.is_recommended_false(), + &mut enabled_rules, + ); + enabled_rules.extend(&group.get_enabled_rules()); + disabled_rules.extend(&group.get_disabled_rules()); + } else if self.is_all_true() { + enabled_rules.extend(Cluster::all_rules_as_filters()); + } else if !self.is_recommended_false() { + enabled_rules.extend(Cluster::recommended_rules_as_filters()); + } + if let Some(group) = self.schema.as_ref() { + group.collect_preset_rules( + self.is_all_true(), + !self.is_recommended_false(), + &mut enabled_rules, + ); + enabled_rules.extend(&group.get_enabled_rules()); + disabled_rules.extend(&group.get_disabled_rules()); + } else if self.is_all_true() { + enabled_rules.extend(Schema::all_rules_as_filters()); + } else if !self.is_recommended_false() { + enabled_rules.extend(Schema::recommended_rules_as_filters()); + } + enabled_rules.difference(&disabled_rules).copied().collect() + } + #[doc = r" It returns the disabled rules by configuration."] + pub fn as_disabled_rules(&self) -> FxHashSet> { + let mut disabled_rules = FxHashSet::default(); + if let Some(group) = self.base.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + if let Some(group) = self.cluster.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + if let Some(group) = self.schema.as_ref() { + disabled_rules.extend(&group.get_disabled_rules()); + } + disabled_rules + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase", default, deny_unknown_fields)] +#[doc = r" A list of rules that belong to this group"] +pub struct Base { + #[doc = r" It enables the recommended rules for this group"] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules for this group."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[doc = "CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns"] + #[serde(skip_serializing_if = "Option::is_none")] + pub composite_primary_key_too_many_columns: Option>, + #[doc = "HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_objects_with_uppercase: Option>, + #[doc = "HowManyRedudantIndex (B002): Count number of redundant index vs nb index."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_redudant_index: Option>, + #[doc = "HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_table_without_index_on_fk: Option>, + #[doc = "HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_table_without_primary_key: Option>, + #[doc = "HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_never_selected: Option>, + #[doc = "HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_fk_mismatch: Option>, + #[doc = "HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_fk_outside_schema: Option>, + #[doc = "HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_reserved_keywords: Option>, + #[doc = "HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers."] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_tables_with_same_trigger: Option>, + #[doc = "HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)"] + #[serde(skip_serializing_if = "Option::is_none")] + pub how_many_unused_index: Option>, + #[doc = "SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners."] + #[serde(skip_serializing_if = "Option::is_none")] + pub several_table_owner_in_schema: Option>, +} +impl Base { + const GROUP_NAME: &'static str = "base"; + pub(crate) const GROUP_RULES: &'static [&'static str] = &[ + "compositePrimaryKeyTooManyColumns", + "howManyObjectsWithUppercase", + "howManyRedudantIndex", + "howManyTableWithoutIndexOnFk", + "howManyTableWithoutPrimaryKey", + "howManyTablesNeverSelected", + "howManyTablesWithFkMismatch", + "howManyTablesWithFkOutsideSchema", + "howManyTablesWithReservedKeywords", + "howManyTablesWithSameTrigger", + "howManyUnusedIndex", + "severalTableOwnerInSchema", + ]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11]), + ]; + const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11]), + ]; + #[doc = r" Retrieves the recommended rules"] + pub(crate) fn is_recommended_true(&self) -> bool { + matches!(self.recommended, Some(true)) + } + pub(crate) fn is_recommended_unset(&self) -> bool { + self.recommended.is_none() + } + pub(crate) fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + pub(crate) fn is_all_unset(&self) -> bool { + self.all.is_none() + } + pub(crate) fn get_enabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.composite_primary_key_too_many_columns.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.how_many_objects_with_uppercase.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.how_many_redudant_index.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.how_many_table_without_index_on_fk.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.how_many_table_without_primary_key.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + if let Some(rule) = self.how_many_tables_never_selected.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_mismatch.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_outside_schema.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7])); + } + } + if let Some(rule) = self.how_many_tables_with_reserved_keywords.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8])); + } + } + if let Some(rule) = self.how_many_tables_with_same_trigger.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9])); + } + } + if let Some(rule) = self.how_many_unused_index.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10])); + } + } + if let Some(rule) = self.several_table_owner_in_schema.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11])); + } + } + index_set + } + pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.composite_primary_key_too_many_columns.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.how_many_objects_with_uppercase.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.how_many_redudant_index.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.how_many_table_without_index_on_fk.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.how_many_table_without_primary_key.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + if let Some(rule) = self.how_many_tables_never_selected.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[5])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_mismatch.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[6])); + } + } + if let Some(rule) = self.how_many_tables_with_fk_outside_schema.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[7])); + } + } + if let Some(rule) = self.how_many_tables_with_reserved_keywords.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[8])); + } + } + if let Some(rule) = self.how_many_tables_with_same_trigger.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[9])); + } + } + if let Some(rule) = self.how_many_unused_index.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[10])); + } + } + if let Some(rule) = self.several_table_owner_in_schema.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[11])); + } + } + index_set + } + #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] + pub(crate) fn has_rule(rule_name: &str) -> Option<&'static str> { + Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) + } + pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::RECOMMENDED_RULES_AS_FILTERS + } + pub(crate) fn all_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::ALL_RULES_AS_FILTERS + } + #[doc = r" Select preset rules"] + pub(crate) fn collect_preset_rules( + &self, + parent_is_all: bool, + parent_is_recommended: bool, + enabled_rules: &mut FxHashSet>, + ) { + if self.is_all_true() || self.is_all_unset() && parent_is_all { + enabled_rules.extend(Self::all_rules_as_filters()); + } else if self.is_recommended_true() + || self.is_recommended_unset() && self.is_all_unset() && parent_is_recommended + { + enabled_rules.extend(Self::recommended_rules_as_filters()); + } + } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + "compositePrimaryKeyTooManyColumns" => Severity::Warning, + "howManyObjectsWithUppercase" => Severity::Warning, + "howManyRedudantIndex" => Severity::Warning, + "howManyTableWithoutIndexOnFk" => Severity::Warning, + "howManyTableWithoutPrimaryKey" => Severity::Warning, + "howManyTablesNeverSelected" => Severity::Warning, + "howManyTablesWithFkMismatch" => Severity::Warning, + "howManyTablesWithFkOutsideSchema" => Severity::Warning, + "howManyTablesWithReservedKeywords" => Severity::Warning, + "howManyTablesWithSameTrigger" => Severity::Warning, + "howManyUnusedIndex" => Severity::Warning, + "severalTableOwnerInSchema" => Severity::Warning, + _ => unreachable!(), + } + } + pub(crate) fn get_rule_configuration( + &self, + rule_name: &str, + ) -> Option<(RulePlainConfiguration, Option)> { + match rule_name { + "compositePrimaryKeyTooManyColumns" => self + .composite_primary_key_too_many_columns + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyObjectsWithUppercase" => self + .how_many_objects_with_uppercase + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyRedudantIndex" => self + .how_many_redudant_index + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTableWithoutIndexOnFk" => self + .how_many_table_without_index_on_fk + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTableWithoutPrimaryKey" => self + .how_many_table_without_primary_key + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesNeverSelected" => self + .how_many_tables_never_selected + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithFkMismatch" => self + .how_many_tables_with_fk_mismatch + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithFkOutsideSchema" => self + .how_many_tables_with_fk_outside_schema + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithReservedKeywords" => self + .how_many_tables_with_reserved_keywords + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyTablesWithSameTrigger" => self + .how_many_tables_with_same_trigger + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "howManyUnusedIndex" => self + .how_many_unused_index + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "severalTableOwnerInSchema" => self + .several_table_owner_in_schema + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + _ => None, + } + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase", default, deny_unknown_fields)] +#[doc = r" A list of rules that belong to this group"] +pub struct Cluster { + #[doc = r" It enables the recommended rules for this group"] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules for this group."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[doc = "PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256."] + #[serde(skip_serializing_if = "Option::is_none")] + pub password_encryption_is_md5: Option>, + #[doc = "PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."] + #[serde(skip_serializing_if = "Option::is_none")] + pub pg_hba_entries_with_method_trust_or_password_should_not_exists: + Option>, + #[doc = "PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."] + #[serde(skip_serializing_if = "Option::is_none")] + pub pg_hba_entries_with_method_trust_should_not_exists: Option>, +} +impl Cluster { + const GROUP_NAME: &'static str = "cluster"; + pub(crate) const GROUP_RULES: &'static [&'static str] = &[ + "passwordEncryptionIsMd5", + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists", + "pgHbaEntriesWithMethodTrustShouldNotExists", + ]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + ]; + const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + ]; + #[doc = r" Retrieves the recommended rules"] + pub(crate) fn is_recommended_true(&self) -> bool { + matches!(self.recommended, Some(true)) + } + pub(crate) fn is_recommended_unset(&self) -> bool { + self.recommended.is_none() + } + pub(crate) fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + pub(crate) fn is_all_unset(&self) -> bool { + self.all.is_none() + } + pub(crate) fn get_enabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.password_encryption_is_md5.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_or_password_should_not_exists + .as_ref() + { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_should_not_exists + .as_ref() + { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + index_set + } + pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.password_encryption_is_md5.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_or_password_should_not_exists + .as_ref() + { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self + .pg_hba_entries_with_method_trust_should_not_exists + .as_ref() + { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + index_set + } + #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] + pub(crate) fn has_rule(rule_name: &str) -> Option<&'static str> { + Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) + } + pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::RECOMMENDED_RULES_AS_FILTERS + } + pub(crate) fn all_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::ALL_RULES_AS_FILTERS + } + #[doc = r" Select preset rules"] + pub(crate) fn collect_preset_rules( + &self, + parent_is_all: bool, + parent_is_recommended: bool, + enabled_rules: &mut FxHashSet>, + ) { + if self.is_all_true() || self.is_all_unset() && parent_is_all { + enabled_rules.extend(Self::all_rules_as_filters()); + } else if self.is_recommended_true() + || self.is_recommended_unset() && self.is_all_unset() && parent_is_recommended + { + enabled_rules.extend(Self::recommended_rules_as_filters()); + } + } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + "passwordEncryptionIsMd5" => Severity::Warning, + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => Severity::Warning, + "pgHbaEntriesWithMethodTrustShouldNotExists" => Severity::Warning, + _ => unreachable!(), + } + } + pub(crate) fn get_rule_configuration( + &self, + rule_name: &str, + ) -> Option<(RulePlainConfiguration, Option)> { + match rule_name { + "passwordEncryptionIsMd5" => self + .password_encryption_is_md5 + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => self + .pg_hba_entries_with_method_trust_or_password_should_not_exists + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "pgHbaEntriesWithMethodTrustShouldNotExists" => self + .pg_hba_entries_with_method_trust_should_not_exists + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + _ => None, + } + } +} +#[derive(Clone, Debug, Default, Deserialize, Eq, Merge, PartialEq, Serialize)] +#[cfg_attr(feature = "schema", derive(JsonSchema))] +#[serde(rename_all = "camelCase", default, deny_unknown_fields)] +#[doc = r" A list of rules that belong to this group"] +pub struct Schema { + #[doc = r" It enables the recommended rules for this group"] + #[serde(skip_serializing_if = "Option::is_none")] + pub recommended: Option, + #[doc = r" It enables ALL rules for this group."] + #[serde(skip_serializing_if = "Option::is_none")] + pub all: Option, + #[doc = "OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary)."] + #[serde(skip_serializing_if = "Option::is_none")] + pub owner_schema_is_internal_role: Option>, + #[doc = "SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match."] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_owner_do_not_match_table_owner: Option>, + #[doc = "SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy."] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_prefixed_or_suffixed_with_envt: Option>, + #[doc = "SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it."] + #[serde(skip_serializing_if = "Option::is_none")] + pub schema_with_default_role_not_granted: Option>, + #[doc = "UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects."] + #[serde(skip_serializing_if = "Option::is_none")] + pub unsecured_public_schema: Option>, +} +impl Schema { + const GROUP_NAME: &'static str = "schema"; + pub(crate) const GROUP_RULES: &'static [&'static str] = &[ + "ownerSchemaIsInternalRole", + "schemaOwnerDoNotMatchTableOwner", + "schemaPrefixedOrSuffixedWithEnvt", + "schemaWithDefaultRoleNotGranted", + "unsecuredPublicSchema", + ]; + const RECOMMENDED_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + ]; + const ALL_RULES_AS_FILTERS: &'static [RuleFilter<'static>] = &[ + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3]), + RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4]), + ]; + #[doc = r" Retrieves the recommended rules"] + pub(crate) fn is_recommended_true(&self) -> bool { + matches!(self.recommended, Some(true)) + } + pub(crate) fn is_recommended_unset(&self) -> bool { + self.recommended.is_none() + } + pub(crate) fn is_all_true(&self) -> bool { + matches!(self.all, Some(true)) + } + pub(crate) fn is_all_unset(&self) -> bool { + self.all.is_none() + } + pub(crate) fn get_enabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.owner_schema_is_internal_role.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.schema_owner_do_not_match_table_owner.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.schema_prefixed_or_suffixed_with_envt.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.schema_with_default_role_not_granted.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.unsecured_public_schema.as_ref() { + if rule.is_enabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + index_set + } + pub(crate) fn get_disabled_rules(&self) -> FxHashSet> { + let mut index_set = FxHashSet::default(); + if let Some(rule) = self.owner_schema_is_internal_role.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[0])); + } + } + if let Some(rule) = self.schema_owner_do_not_match_table_owner.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[1])); + } + } + if let Some(rule) = self.schema_prefixed_or_suffixed_with_envt.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[2])); + } + } + if let Some(rule) = self.schema_with_default_role_not_granted.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[3])); + } + } + if let Some(rule) = self.unsecured_public_schema.as_ref() { + if rule.is_disabled() { + index_set.insert(RuleFilter::Rule(Self::GROUP_NAME, Self::GROUP_RULES[4])); + } + } + index_set + } + #[doc = r" Checks if, given a rule name, matches one of the rules contained in this category"] + pub(crate) fn has_rule(rule_name: &str) -> Option<&'static str> { + Some(Self::GROUP_RULES[Self::GROUP_RULES.binary_search(&rule_name).ok()?]) + } + pub(crate) fn recommended_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::RECOMMENDED_RULES_AS_FILTERS + } + pub(crate) fn all_rules_as_filters() -> &'static [RuleFilter<'static>] { + Self::ALL_RULES_AS_FILTERS + } + #[doc = r" Select preset rules"] + pub(crate) fn collect_preset_rules( + &self, + parent_is_all: bool, + parent_is_recommended: bool, + enabled_rules: &mut FxHashSet>, + ) { + if self.is_all_true() || self.is_all_unset() && parent_is_all { + enabled_rules.extend(Self::all_rules_as_filters()); + } else if self.is_recommended_true() + || self.is_recommended_unset() && self.is_all_unset() && parent_is_recommended + { + enabled_rules.extend(Self::recommended_rules_as_filters()); + } + } + pub(crate) fn severity(rule_name: &str) -> Severity { + match rule_name { + "ownerSchemaIsInternalRole" => Severity::Warning, + "schemaOwnerDoNotMatchTableOwner" => Severity::Warning, + "schemaPrefixedOrSuffixedWithEnvt" => Severity::Warning, + "schemaWithDefaultRoleNotGranted" => Severity::Warning, + "unsecuredPublicSchema" => Severity::Warning, + _ => unreachable!(), + } + } + pub(crate) fn get_rule_configuration( + &self, + rule_name: &str, + ) -> Option<(RulePlainConfiguration, Option)> { + match rule_name { + "ownerSchemaIsInternalRole" => self + .owner_schema_is_internal_role + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "schemaOwnerDoNotMatchTableOwner" => self + .schema_owner_do_not_match_table_owner + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "schemaPrefixedOrSuffixedWithEnvt" => self + .schema_prefixed_or_suffixed_with_envt + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "schemaWithDefaultRoleNotGranted" => self + .schema_with_default_role_not_granted + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + "unsecuredPublicSchema" => self + .unsecured_public_schema + .as_ref() + .map(|conf| (conf.level(), conf.get_options())), + _ => None, + } + } +} +#[doc = r" Push the configured rules to the analyser"] +pub fn push_to_analyser_rules( + rules: &Rules, + metadata: &pgls_analyse::MetadataRegistry, + analyser_rules: &mut pgls_analyser::LinterRules, +) { + if let Some(rules) = rules.base.as_ref() { + for rule_name in Base::GROUP_RULES { + if let Some((_, Some(rule_options))) = rules.get_rule_configuration(rule_name) { + if let Some(rule_key) = metadata.find_rule("base", rule_name) { + analyser_rules.push_rule(rule_key, rule_options); + } + } + } + } + if let Some(rules) = rules.cluster.as_ref() { + for rule_name in Cluster::GROUP_RULES { + if let Some((_, Some(rule_options))) = rules.get_rule_configuration(rule_name) { + if let Some(rule_key) = metadata.find_rule("cluster", rule_name) { + analyser_rules.push_rule(rule_key, rule_options); + } + } + } + } + if let Some(rules) = rules.schema.as_ref() { + for rule_name in Schema::GROUP_RULES { + if let Some((_, Some(rule_options))) = rules.get_rule_configuration(rule_name) { + if let Some(rule_key) = metadata.find_rule("schema", rule_name) { + analyser_rules.push_rule(rule_key, rule_options); + } + } + } + } +} +#[test] +fn test_order() { + for items in Base::GROUP_RULES.windows(2) { + assert!(items[0] < items[1], "{} < {}", items[0], items[1]); + } + for items in Cluster::GROUP_RULES.windows(2) { + assert!(items[0] < items[1], "{} < {}", items[0], items[1]); + } + for items in Schema::GROUP_RULES.windows(2) { + assert!(items[0] < items[1], "{} < {}", items[0], items[1]); + } +} diff --git a/crates/pgls_configuration/src/rules/selector.rs b/crates/pgls_configuration/src/rules/selector.rs index 4627e2388..e206fff25 100644 --- a/crates/pgls_configuration/src/rules/selector.rs +++ b/crates/pgls_configuration/src/rules/selector.rs @@ -2,11 +2,12 @@ use pgls_analyse::RuleFilter; use std::str::FromStr; -/// Represents a rule group from any analyzer (linter or splinter) +/// Represents a rule group from any analyzer (linter, splinter, or pglinter) #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] pub enum AnalyzerGroup { Linter(crate::linter::RuleGroup), Splinter(crate::splinter::RuleGroup), + PgLinter(crate::pglinter::RuleGroup), } impl AnalyzerGroup { @@ -14,6 +15,7 @@ impl AnalyzerGroup { match self { Self::Linter(group) => group.as_str(), Self::Splinter(group) => group.as_str(), + Self::PgLinter(group) => group.as_str(), } } @@ -21,6 +23,7 @@ impl AnalyzerGroup { match self { Self::Linter(_) => "lint", Self::Splinter(_) => "splinter", + Self::PgLinter(_) => "pglinter", } } } @@ -57,6 +60,8 @@ impl FromStr for RuleSelector { ("lint", rest) } else if let Some(rest) = selector.strip_prefix("splinter/") { ("splinter", rest) + } else if let Some(rest) = selector.strip_prefix("pglinter/") { + ("pglinter", rest) } else { // Default to lint for backward compatibility ("lint", selector) @@ -84,6 +89,17 @@ impl FromStr for RuleSelector { Err("This rule doesn't exist.") } } + "pglinter" => { + let group = crate::pglinter::RuleGroup::from_str(group_name)?; + if let Some(rule_name) = crate::pglinter::Rules::has_rule(group, rule_name) { + Ok(RuleSelector::Rule( + AnalyzerGroup::PgLinter(group), + rule_name, + )) + } else { + Err("This rule doesn't exist.") + } + } _ => Err("Unknown analyzer type."), } } else { @@ -101,6 +117,12 @@ impl FromStr for RuleSelector { "This group doesn't exist. Use the syntax `/` to specify a rule.", ), }, + "pglinter" => match crate::pglinter::RuleGroup::from_str(rest) { + Ok(group) => Ok(RuleSelector::Group(AnalyzerGroup::PgLinter(group))), + Err(_) => Err( + "This group doesn't exist. Use the syntax `/` to specify a rule.", + ), + }, _ => Err("Unknown analyzer type."), } } diff --git a/crates/pgls_diagnostics_categories/src/categories.rs b/crates/pgls_diagnostics_categories/src/categories.rs index eb9e323ac..2b27f997f 100644 --- a/crates/pgls_diagnostics_categories/src/categories.rs +++ b/crates/pgls_diagnostics_categories/src/categories.rs @@ -47,6 +47,35 @@ define_categories! { "lint/safety/runningStatementWhileHoldingAccessExclusive": "https://pg-language-server.com/latest/reference/rules/running-statement-while-holding-access-exclusive/", "lint/safety/transactionNesting": "https://pg-language-server.com/latest/reference/rules/transaction-nesting/", // end lint rules + // pglinter rules start + // Meta diagnostics + "pglinter/extensionNotInstalled": "Install the pglinter extension with: CREATE EXTENSION pglinter", + "pglinter/ruleDisabledInExtension": "Enable the rule in the extension with: UPDATE pglinter.rules SET enable = true WHERE code = ''", + // Base rules (B-series) + "pglinter/base/compositePrimaryKeyTooManyColumns": "https://github.com/pmpetit/pglinter#b012", + "pglinter/base/howManyObjectsWithUppercase": "https://github.com/pmpetit/pglinter#b005", + "pglinter/base/howManyRedudantIndex": "https://github.com/pmpetit/pglinter#b002", + "pglinter/base/howManyTableWithoutIndexOnFk": "https://github.com/pmpetit/pglinter#b003", + "pglinter/base/howManyTableWithoutPrimaryKey": "https://github.com/pmpetit/pglinter#b001", + "pglinter/base/howManyTablesNeverSelected": "https://github.com/pmpetit/pglinter#b006", + "pglinter/base/howManyTablesWithFkMismatch": "https://github.com/pmpetit/pglinter#b008", + "pglinter/base/howManyTablesWithFkOutsideSchema": "https://github.com/pmpetit/pglinter#b007", + "pglinter/base/howManyTablesWithReservedKeywords": "https://github.com/pmpetit/pglinter#b010", + "pglinter/base/howManyTablesWithSameTrigger": "https://github.com/pmpetit/pglinter#b009", + "pglinter/base/howManyUnusedIndex": "https://github.com/pmpetit/pglinter#b004", + "pglinter/base/severalTableOwnerInSchema": "https://github.com/pmpetit/pglinter#b011", + // Cluster rules (C-series) + "pglinter/cluster/passwordEncryptionIsMd5": "https://github.com/pmpetit/pglinter#c003", + "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists": "https://github.com/pmpetit/pglinter#c002", + "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists": "https://github.com/pmpetit/pglinter#c001", + // Schema rules (S-series) + "pglinter/schema/ownerSchemaIsInternalRole": "https://github.com/pmpetit/pglinter#s004", + "pglinter/schema/schemaOwnerDoNotMatchTableOwner": "https://github.com/pmpetit/pglinter#s005", + "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt": "https://github.com/pmpetit/pglinter#s002", + "pglinter/schema/schemaWithDefaultRoleNotGranted": "https://github.com/pmpetit/pglinter#s001", + "pglinter/schema/unsecuredPublicSchema": "https://github.com/pmpetit/pglinter#s003", + // pglinter rules end + // splinter rules start "splinter/performance/authRlsInitplan": "https://supabase.com/docs/guides/database/database-linter?lint=0003_auth_rls_initplan", "splinter/performance/duplicateIndex": "https://supabase.com/docs/guides/database/database-linter?lint=0009_duplicate_index", @@ -98,4 +127,11 @@ define_categories! { "splinter/performance", "splinter/security", // Splinter groups end + + // Pglinter groups start + "pglinter", + "pglinter/base", + "pglinter/cluster", + "pglinter/schema", + // Pglinter groups end } diff --git a/crates/pgls_pglinter/Cargo.toml b/crates/pgls_pglinter/Cargo.toml new file mode 100644 index 000000000..983e00a8f --- /dev/null +++ b/crates/pgls_pglinter/Cargo.toml @@ -0,0 +1,29 @@ +[package] +authors.workspace = true +categories.workspace = true +description = "pglinter Postgres extension integration for database linting" +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +name = "pgls_pglinter" +repository.workspace = true +version = "0.0.0" + +[dependencies] +pgls_analyse.workspace = true +pgls_diagnostics.workspace = true +pgls_diagnostics_categories.workspace = true +pgls_schema_cache.workspace = true +rustc-hash.workspace = true +serde.workspace = true +serde_json.workspace = true +sqlx.workspace = true + +[dev-dependencies] +insta.workspace = true +pgls_console.workspace = true +pgls_test_utils.workspace = true + +[lib] +doctest = false diff --git a/crates/pgls_pglinter/src/cache.rs b/crates/pgls_pglinter/src/cache.rs new file mode 100644 index 000000000..015fd73f2 --- /dev/null +++ b/crates/pgls_pglinter/src/cache.rs @@ -0,0 +1,58 @@ +//! Pglinter extension cache for avoiding repeated database queries + +use pgls_schema_cache::SchemaCache; +use rustc_hash::FxHashSet; +use sqlx::PgPool; + +/// Cached pglinter extension state (loaded once, reused) +#[derive(Debug, Clone, Default)] +pub struct PglinterCache { + /// Whether the pglinter extension is installed + pub extension_installed: bool, + /// Rule codes that are disabled in the pglinter extension + pub disabled_rules: FxHashSet, +} + +impl PglinterCache { + /// Load pglinter extension state from database using official API + pub async fn load(conn: &PgPool, schema_cache: &SchemaCache) -> Result { + let extension_installed = schema_cache.extensions.iter().any(|e| e.name == "pglinter"); + + if !extension_installed { + return Ok(Self { + extension_installed: false, + disabled_rules: FxHashSet::default(), + }); + } + + // Get disabled rules using pglinter.show_rules() - single query + let disabled_rules = get_disabled_rules(conn).await?; + + Ok(Self { + extension_installed, + disabled_rules, + }) + } + + /// Create initial cache from schema cache only (disabled rules will need API call later) + pub fn from_schema_cache(schema_cache: &SchemaCache) -> Self { + Self { + extension_installed: schema_cache.extensions.iter().any(|e| e.name == "pglinter"), + disabled_rules: FxHashSet::default(), + } + } +} + +/// Get disabled rules using pglinter's official API: pglinter.show_rules() +pub async fn get_disabled_rules(conn: &PgPool) -> Result, sqlx::Error> { + let rows: Vec<(String, bool)> = + sqlx::query_as("SELECT rule_code, enabled FROM pglinter.show_rules()") + .fetch_all(conn) + .await?; + + Ok(rows + .into_iter() + .filter(|(_, enabled)| !enabled) + .map(|(code, _)| code) + .collect()) +} diff --git a/crates/pgls_pglinter/src/diagnostics.rs b/crates/pgls_pglinter/src/diagnostics.rs new file mode 100644 index 000000000..ad19c81fc --- /dev/null +++ b/crates/pgls_pglinter/src/diagnostics.rs @@ -0,0 +1,183 @@ +//! Pglinter diagnostic types and conversion from SARIF + +use pgls_diagnostics::{ + Advices, Category, DatabaseObjectOwned, Diagnostic, LogCategory, MessageAndDescription, + Severity, Visit, +}; +use std::io; + +use crate::sarif; + +/// A specialized diagnostic for pglinter (database-level linting via pglinter extension). +#[derive(Debug, Diagnostic, PartialEq)] +pub struct PglinterDiagnostic { + #[category] + pub category: &'static Category, + + #[location(database_object)] + pub db_object: Option, + + #[message] + #[description] + pub message: MessageAndDescription, + + #[severity] + pub severity: Severity, + + #[advice] + pub advices: PglinterAdvices, +} + +/// Advices for pglinter diagnostics +#[derive(Debug, PartialEq)] +pub struct PglinterAdvices { + /// General description of what this rule detects + pub description: String, + + /// Rule code (e.g., "B001", "S001", "C001") + pub rule_code: Option, + + /// Suggested fixes for the issue + pub fixes: Vec, + + /// List of affected database objects + pub object_list: Option, +} + +impl Advices for PglinterAdvices { + fn record(&self, visitor: &mut dyn Visit) -> io::Result<()> { + if !self.description.is_empty() { + visitor.record_log(LogCategory::None, &self.description)?; + } + + if let Some(code) = &self.rule_code { + visitor.record_log(LogCategory::Info, &format!("Rule: {code}"))?; + } + + if let Some(objects) = &self.object_list { + if !objects.is_empty() { + visitor.record_log(LogCategory::None, &"Affected objects:")?; + for line in objects.lines() { + visitor.record_log(LogCategory::Info, &format!(" {line}"))?; + } + } + } + + if !self.fixes.is_empty() { + visitor.record_log(LogCategory::None, &"How to fix:")?; + for (i, fix) in self.fixes.iter().enumerate() { + let num = i + 1; + visitor.record_log(LogCategory::Info, &format!(" {num}. {fix}"))?; + } + } + + Ok(()) + } +} + +/// Error when converting SARIF to diagnostics +#[derive(Debug)] +pub struct UnknownRuleError { + pub rule_code: String, +} + +impl std::fmt::Display for UnknownRuleError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Unknown pglinter rule code: {}", self.rule_code) + } +} + +impl std::error::Error for UnknownRuleError {} + +impl PglinterDiagnostic { + /// Try to convert a single SARIF result to a pglinter diagnostic + pub fn try_from_sarif( + result: &sarif::Result, + rule_code: &str, + ) -> Result { + let category = + crate::registry::get_rule_category(rule_code).ok_or_else(|| UnknownRuleError { + rule_code: rule_code.to_string(), + })?; + + let metadata = crate::registry::get_rule_metadata_by_code(rule_code); + + let severity = match result.level_str() { + "error" => Severity::Error, + "warning" => Severity::Warning, + "note" => Severity::Information, + _ => Severity::Warning, + }; + + let message = result.message_text().to_string(); + let description = metadata + .map(|m| m.description.to_string()) + .unwrap_or_else(|| message.clone()); + + let fixes = metadata + .map(|m| m.fixes.iter().map(|s| s.to_string()).collect()) + .unwrap_or_default(); + + let object_list = { + let names = result.logical_location_names(); + if names.is_empty() { + None + } else { + Some(names.join("\n")) + } + }; + + Ok(PglinterDiagnostic { + category, + db_object: None, + message: message.into(), + severity, + advices: PglinterAdvices { + description, + rule_code: Some(rule_code.to_string()), + fixes, + object_list, + }, + }) + } + + /// Create diagnostic for missing pglinter extension + pub fn extension_not_installed() -> PglinterDiagnostic { + PglinterDiagnostic { + category: pgls_diagnostics::category!("pglinter/extensionNotInstalled"), + db_object: None, + message: "The pglinter extension is not installed in the database. Install it with 'CREATE EXTENSION pglinter' or disable pglinter rules in your configuration.".into(), + severity: Severity::Error, + advices: PglinterAdvices { + description: "pglinter rules are enabled in your configuration but the extension is not installed.".to_string(), + rule_code: None, + fixes: vec!["Install the pglinter extension: CREATE EXTENSION pglinter".to_string()], + object_list: None, + }, + } + } + + /// Create diagnostic for rule disabled in pglinter extension + pub fn rule_disabled_in_extension(rule_code: &str) -> PglinterDiagnostic { + let description = format!( + "Rule {rule_code} is enabled in configuration but disabled in pglinter extension. Enable it with: SELECT pglinter.enable_rule('{rule_code}')" + ); + + PglinterDiagnostic { + category: pgls_diagnostics::category!("pglinter/ruleDisabledInExtension"), + db_object: None, + message: description.into(), + severity: Severity::Error, + advices: PglinterAdvices { + description: format!( + "Rule {rule_code} is configured to run but is disabled in the pglinter extension." + ), + rule_code: Some(rule_code.to_string()), + fixes: vec![format!( + "Enable the rule: SELECT pglinter.enable_rule('{rule_code}')" + )], + object_list: None, + }, + } + } +} diff --git a/crates/pgls_pglinter/src/lib.rs b/crates/pgls_pglinter/src/lib.rs new file mode 100644 index 000000000..8506a70e2 --- /dev/null +++ b/crates/pgls_pglinter/src/lib.rs @@ -0,0 +1,157 @@ +//! pglinter Postgres extension integration for database linting + +mod cache; +mod diagnostics; +pub mod registry; +pub mod rule; +pub mod rules; +pub mod sarif; + +use pgls_analyse::{AnalysisFilter, RegistryVisitor, RuleMeta}; +use pgls_schema_cache::SchemaCache; +use sqlx::PgPool; + +pub use cache::PglinterCache; +pub use diagnostics::{PglinterAdvices, PglinterDiagnostic}; +pub use rule::PglinterRule; +pub use sarif::SarifLog; + +/// Parameters for running pglinter +#[derive(Debug)] +pub struct PglinterParams<'a> { + pub conn: &'a PgPool, + pub schema_cache: &'a SchemaCache, +} + +/// Visitor that collects enabled pglinter rules based on filter +struct RuleCollector<'a> { + filter: &'a AnalysisFilter<'a>, + enabled_rules: Vec, +} + +impl<'a> RegistryVisitor for RuleCollector<'a> { + fn record_category(&mut self) { + if self.filter.match_category::() { + C::record_groups(self); + } + } + + fn record_group(&mut self) { + if self.filter.match_group::() { + G::record_rules(self); + } + } + + fn record_rule(&mut self) { + if self.filter.match_rule::() { + if let Some(code) = registry::get_rule_code(R::METADATA.name) { + self.enabled_rules.push(code.to_string()); + } + } + } +} + +fn collect_enabled_rules(filter: &AnalysisFilter<'_>) -> Vec { + let mut collector = RuleCollector { + filter, + enabled_rules: Vec::new(), + }; + registry::visit_registry(&mut collector); + collector.enabled_rules +} + +/// Run pglinter rules against the database +pub async fn run_pglinter( + params: PglinterParams<'_>, + filter: &AnalysisFilter<'_>, + cache: Option<&PglinterCache>, +) -> Result, sqlx::Error> { + let mut results = vec![]; + + // Check extension installed + let extension_installed = cache.map(|c| c.extension_installed).unwrap_or_else(|| { + params + .schema_cache + .extensions + .iter() + .any(|e| e.name == "pglinter") + }); + + // Collect enabled rules from config + let enabled_rules = collect_enabled_rules(filter); + + if !extension_installed { + if !enabled_rules.is_empty() { + results.push(PglinterDiagnostic::extension_not_installed()); + } + return Ok(results); + } + + if enabled_rules.is_empty() { + return Ok(results); + } + + // Get disabled rules from extension + let disabled_in_extension = match cache { + Some(c) => c.disabled_rules.clone(), + None => cache::get_disabled_rules(params.conn).await?, + }; + + // Check for mismatches and collect runnable rules + let mut runnable_rules = Vec::new(); + for rule_code in &enabled_rules { + if disabled_in_extension.contains(rule_code) { + results.push(PglinterDiagnostic::rule_disabled_in_extension(rule_code)); + } else { + runnable_rules.push(rule_code.clone()); + } + } + + if runnable_rules.is_empty() { + return Ok(results); + } + + // Execute each rule + for rule_code in &runnable_rules { + if let Some(diags) = execute_rule(params.conn, rule_code).await? { + results.extend(diags); + } + } + + Ok(results) +} + +/// Execute a single pglinter rule using pglinter.check_rule() +async fn execute_rule( + conn: &PgPool, + rule_code: &str, +) -> Result>, sqlx::Error> { + let result: Option = sqlx::query_scalar("SELECT pglinter.check_rule($1)") + .bind(rule_code) + .fetch_optional(conn) + .await?; + + let Some(sarif_json) = result else { + return Ok(None); + }; + + let sarif = match SarifLog::parse(&sarif_json) { + Ok(s) => s, + Err(_) => return Ok(None), + }; + + if !sarif.has_results() { + return Ok(None); + } + + let diags: Vec<_> = sarif + .all_results() + .filter_map(|result| PglinterDiagnostic::try_from_sarif(result, rule_code).ok()) + .collect(); + + if diags.is_empty() { + Ok(None) + } else { + Ok(Some(diags)) + } +} diff --git a/crates/pgls_pglinter/src/registry.rs b/crates/pgls_pglinter/src/registry.rs new file mode 100644 index 000000000..1ec8516b6 --- /dev/null +++ b/crates/pgls_pglinter/src/registry.rs @@ -0,0 +1,463 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use pgls_analyse::RegistryVisitor; +use pgls_diagnostics::Category; +#[doc = r" Metadata for a pglinter rule"] +#[derive(Debug, Clone, Copy)] +pub struct RuleMetadata { + #[doc = r#" Rule code (e.g., "B001")"#] + pub code: &'static str, + #[doc = r" Rule name in camelCase"] + pub name: &'static str, + #[doc = r" Rule scope (BASE, SCHEMA, CLUSTER)"] + pub scope: &'static str, + #[doc = r" Description of what the rule detects"] + pub description: &'static str, + #[doc = r" Suggested fixes"] + pub fixes: &'static [&'static str], +} +#[doc = r" Visit all pglinter rules using the visitor pattern"] +pub fn visit_registry(registry: &mut V) { + registry.record_category::(); +} +#[doc = r" Get the pglinter rule code from the camelCase name"] +pub fn get_rule_code(name: &str) -> Option<&'static str> { + match name { + "compositePrimaryKeyTooManyColumns" => Some("B012"), + "howManyObjectsWithUppercase" => Some("B005"), + "howManyRedudantIndex" => Some("B002"), + "howManyTableWithoutIndexOnFk" => Some("B003"), + "howManyTableWithoutPrimaryKey" => Some("B001"), + "howManyTablesNeverSelected" => Some("B006"), + "howManyTablesWithFkMismatch" => Some("B008"), + "howManyTablesWithFkOutsideSchema" => Some("B007"), + "howManyTablesWithReservedKeywords" => Some("B010"), + "howManyTablesWithSameTrigger" => Some("B009"), + "howManyUnusedIndex" => Some("B004"), + "ownerSchemaIsInternalRole" => Some("S004"), + "passwordEncryptionIsMd5" => Some("C003"), + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => Some("C002"), + "pgHbaEntriesWithMethodTrustShouldNotExists" => Some("C001"), + "schemaOwnerDoNotMatchTableOwner" => Some("S005"), + "schemaPrefixedOrSuffixedWithEnvt" => Some("S002"), + "schemaWithDefaultRoleNotGranted" => Some("S001"), + "severalTableOwnerInSchema" => Some("B011"), + "unsecuredPublicSchema" => Some("S003"), + _ => None, + } +} +#[doc = r" Get the diagnostic category for a rule code"] +pub fn get_rule_category(code: &str) -> Option<&'static Category> { + match code { + "B012" => Some(::pgls_diagnostics::category!( + "pglinter/base/compositePrimaryKeyTooManyColumns" + )), + "B005" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyObjectsWithUppercase" + )), + "B002" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyRedudantIndex" + )), + "B003" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTableWithoutIndexOnFk" + )), + "B001" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTableWithoutPrimaryKey" + )), + "B006" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesNeverSelected" + )), + "B008" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithFkMismatch" + )), + "B007" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithFkOutsideSchema" + )), + "B010" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithReservedKeywords" + )), + "B009" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyTablesWithSameTrigger" + )), + "B004" => Some(::pgls_diagnostics::category!( + "pglinter/base/howManyUnusedIndex" + )), + "S004" => Some(::pgls_diagnostics::category!( + "pglinter/schema/ownerSchemaIsInternalRole" + )), + "C003" => Some(::pgls_diagnostics::category!( + "pglinter/cluster/passwordEncryptionIsMd5" + )), + "C002" => Some(::pgls_diagnostics::category!( + "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" + )), + "C001" => Some(::pgls_diagnostics::category!( + "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists" + )), + "S005" => Some(::pgls_diagnostics::category!( + "pglinter/schema/schemaOwnerDoNotMatchTableOwner" + )), + "S002" => Some(::pgls_diagnostics::category!( + "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt" + )), + "S001" => Some(::pgls_diagnostics::category!( + "pglinter/schema/schemaWithDefaultRoleNotGranted" + )), + "B011" => Some(::pgls_diagnostics::category!( + "pglinter/base/severalTableOwnerInSchema" + )), + "S003" => Some(::pgls_diagnostics::category!( + "pglinter/schema/unsecuredPublicSchema" + )), + _ => None, + } +} +#[doc = r" Get rule metadata by name (camelCase)"] +pub fn get_rule_metadata(name: &str) -> Option { + match name { + "compositePrimaryKeyTooManyColumns" => Some(RuleMetadata { + code: "B012", + name: "compositePrimaryKeyTooManyColumns", + scope: "BASE", + description: "Detect tables with composite primary keys involving more than 4 columns", + fixes: &[ + "Consider redesigning the table to avoid composite primary keys with more than 4 columns", + "Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.", + ], + }), + "howManyObjectsWithUppercase" => Some(RuleMetadata { + code: "B005", + name: "howManyObjectsWithUppercase", + scope: "BASE", + description: "Count number of objects with uppercase in name or in columns.", + fixes: &["Do not use uppercase for any database objects"], + }), + "howManyRedudantIndex" => Some(RuleMetadata { + code: "B002", + name: "howManyRedudantIndex", + scope: "BASE", + description: "Count number of redundant index vs nb index.", + fixes: &[ + "remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold", + ], + }), + "howManyTableWithoutIndexOnFk" => Some(RuleMetadata { + code: "B003", + name: "howManyTableWithoutIndexOnFk", + scope: "BASE", + description: "Count number of tables without index on foreign key.", + fixes: &["create a index on foreign key or change warning/error threshold"], + }), + "howManyTableWithoutPrimaryKey" => Some(RuleMetadata { + code: "B001", + name: "howManyTableWithoutPrimaryKey", + scope: "BASE", + description: "Count number of tables without primary key.", + fixes: &["create a primary key or change warning/error threshold"], + }), + "howManyTablesNeverSelected" => Some(RuleMetadata { + code: "B006", + name: "howManyTablesNeverSelected", + scope: "BASE", + description: "Count number of table(s) that has never been selected.", + fixes: &[ + "Is it necessary to update/delete/insert rows in table(s) that are never selected ?", + ], + }), + "howManyTablesWithFkMismatch" => Some(RuleMetadata { + code: "B008", + name: "howManyTablesWithFkMismatch", + scope: "BASE", + description: "Count number of tables with foreign keys that do not match the key reference type.", + fixes: &[ + "Consider column type adjustments to ensure foreign key matches referenced key type", + "ask a dba", + ], + }), + "howManyTablesWithFkOutsideSchema" => Some(RuleMetadata { + code: "B007", + name: "howManyTablesWithFkOutsideSchema", + scope: "BASE", + description: "Count number of tables with foreign keys outside their schema.", + fixes: &[ + "Consider restructuring schema design to keep related tables in same schema", + "ask a dba", + ], + }), + "howManyTablesWithReservedKeywords" => Some(RuleMetadata { + code: "B010", + name: "howManyTablesWithReservedKeywords", + scope: "BASE", + description: "Count number of database objects using reserved keywords in their names.", + fixes: &[ + "Rename database objects to avoid using reserved keywords.", + "Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.", + ], + }), + "howManyTablesWithSameTrigger" => Some(RuleMetadata { + code: "B009", + name: "howManyTablesWithSameTrigger", + scope: "BASE", + description: "Count number of tables using the same trigger vs nb table with their own triggers.", + fixes: &[ + "For more readability and other considerations use one trigger function per table.", + "Sharing the same trigger function add more complexity.", + ], + }), + "howManyUnusedIndex" => Some(RuleMetadata { + code: "B004", + name: "howManyUnusedIndex", + scope: "BASE", + description: "Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)", + fixes: &["remove unused index or change warning/error threshold"], + }), + "ownerSchemaIsInternalRole" => Some(RuleMetadata { + code: "S004", + name: "ownerSchemaIsInternalRole", + scope: "SCHEMA", + description: "Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).", + fixes: &["change schema owner to a functional role"], + }), + "passwordEncryptionIsMd5" => Some(RuleMetadata { + code: "C003", + name: "passwordEncryptionIsMd5", + scope: "CLUSTER", + description: "This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.", + fixes: &[ + "change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = ", + "scram-sha-256", + " ). Warning, you will need to reset all passwords after this parameter is updated.", + ], + }), + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" => Some(RuleMetadata { + code: "C002", + name: "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust or password method in pg_hba.conf"], + }), + "pgHbaEntriesWithMethodTrustShouldNotExists" => Some(RuleMetadata { + code: "C001", + name: "pgHbaEntriesWithMethodTrustShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust method in pg_hba.conf"], + }), + "schemaOwnerDoNotMatchTableOwner" => Some(RuleMetadata { + code: "S005", + name: "schemaOwnerDoNotMatchTableOwner", + scope: "SCHEMA", + description: "The schema owner and tables in the schema do not match.", + fixes: &["For maintenance facilities, schema and tables owners should be the same."], + }), + "schemaPrefixedOrSuffixedWithEnvt" => Some(RuleMetadata { + code: "S002", + name: "schemaPrefixedOrSuffixedWithEnvt", + scope: "SCHEMA", + description: "The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.", + fixes: &[ + "Keep the same schema name across environments. Prefer prefix or suffix the database name", + ], + }), + "schemaWithDefaultRoleNotGranted" => Some(RuleMetadata { + code: "S001", + name: "schemaWithDefaultRoleNotGranted", + scope: "SCHEMA", + description: "The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.", + fixes: &[ + "add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user ", + ], + }), + "severalTableOwnerInSchema" => Some(RuleMetadata { + code: "B011", + name: "severalTableOwnerInSchema", + scope: "BASE", + description: "In a schema there are several tables owned by different owners.", + fixes: &["change table owners to the same functional role"], + }), + "unsecuredPublicSchema" => Some(RuleMetadata { + code: "S003", + name: "unsecuredPublicSchema", + scope: "SCHEMA", + description: "Only authorized users should be allowed to create objects.", + fixes: &["REVOKE CREATE ON SCHEMA FROM PUBLIC"], + }), + _ => None, + } +} +#[doc = r#" Get rule metadata by code (e.g., "B001", "S001", "C001")"#] +pub fn get_rule_metadata_by_code(code: &str) -> Option { + match code { + "B012" => Some(RuleMetadata { + code: "B012", + name: "compositePrimaryKeyTooManyColumns", + scope: "BASE", + description: "Detect tables with composite primary keys involving more than 4 columns", + fixes: &[ + "Consider redesigning the table to avoid composite primary keys with more than 4 columns", + "Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.", + ], + }), + "B005" => Some(RuleMetadata { + code: "B005", + name: "howManyObjectsWithUppercase", + scope: "BASE", + description: "Count number of objects with uppercase in name or in columns.", + fixes: &["Do not use uppercase for any database objects"], + }), + "B002" => Some(RuleMetadata { + code: "B002", + name: "howManyRedudantIndex", + scope: "BASE", + description: "Count number of redundant index vs nb index.", + fixes: &[ + "remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold", + ], + }), + "B003" => Some(RuleMetadata { + code: "B003", + name: "howManyTableWithoutIndexOnFk", + scope: "BASE", + description: "Count number of tables without index on foreign key.", + fixes: &["create a index on foreign key or change warning/error threshold"], + }), + "B001" => Some(RuleMetadata { + code: "B001", + name: "howManyTableWithoutPrimaryKey", + scope: "BASE", + description: "Count number of tables without primary key.", + fixes: &["create a primary key or change warning/error threshold"], + }), + "B006" => Some(RuleMetadata { + code: "B006", + name: "howManyTablesNeverSelected", + scope: "BASE", + description: "Count number of table(s) that has never been selected.", + fixes: &[ + "Is it necessary to update/delete/insert rows in table(s) that are never selected ?", + ], + }), + "B008" => Some(RuleMetadata { + code: "B008", + name: "howManyTablesWithFkMismatch", + scope: "BASE", + description: "Count number of tables with foreign keys that do not match the key reference type.", + fixes: &[ + "Consider column type adjustments to ensure foreign key matches referenced key type", + "ask a dba", + ], + }), + "B007" => Some(RuleMetadata { + code: "B007", + name: "howManyTablesWithFkOutsideSchema", + scope: "BASE", + description: "Count number of tables with foreign keys outside their schema.", + fixes: &[ + "Consider restructuring schema design to keep related tables in same schema", + "ask a dba", + ], + }), + "B010" => Some(RuleMetadata { + code: "B010", + name: "howManyTablesWithReservedKeywords", + scope: "BASE", + description: "Count number of database objects using reserved keywords in their names.", + fixes: &[ + "Rename database objects to avoid using reserved keywords.", + "Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.", + ], + }), + "B009" => Some(RuleMetadata { + code: "B009", + name: "howManyTablesWithSameTrigger", + scope: "BASE", + description: "Count number of tables using the same trigger vs nb table with their own triggers.", + fixes: &[ + "For more readability and other considerations use one trigger function per table.", + "Sharing the same trigger function add more complexity.", + ], + }), + "B004" => Some(RuleMetadata { + code: "B004", + name: "howManyUnusedIndex", + scope: "BASE", + description: "Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)", + fixes: &["remove unused index or change warning/error threshold"], + }), + "S004" => Some(RuleMetadata { + code: "S004", + name: "ownerSchemaIsInternalRole", + scope: "SCHEMA", + description: "Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).", + fixes: &["change schema owner to a functional role"], + }), + "C003" => Some(RuleMetadata { + code: "C003", + name: "passwordEncryptionIsMd5", + scope: "CLUSTER", + description: "This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.", + fixes: &[ + "change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = ", + "scram-sha-256", + " ). Warning, you will need to reset all passwords after this parameter is updated.", + ], + }), + "C002" => Some(RuleMetadata { + code: "C002", + name: "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust or password method in pg_hba.conf"], + }), + "C001" => Some(RuleMetadata { + code: "C001", + name: "pgHbaEntriesWithMethodTrustShouldNotExists", + scope: "CLUSTER", + description: "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + fixes: &["change trust method in pg_hba.conf"], + }), + "S005" => Some(RuleMetadata { + code: "S005", + name: "schemaOwnerDoNotMatchTableOwner", + scope: "SCHEMA", + description: "The schema owner and tables in the schema do not match.", + fixes: &["For maintenance facilities, schema and tables owners should be the same."], + }), + "S002" => Some(RuleMetadata { + code: "S002", + name: "schemaPrefixedOrSuffixedWithEnvt", + scope: "SCHEMA", + description: "The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.", + fixes: &[ + "Keep the same schema name across environments. Prefer prefix or suffix the database name", + ], + }), + "S001" => Some(RuleMetadata { + code: "S001", + name: "schemaWithDefaultRoleNotGranted", + scope: "SCHEMA", + description: "The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.", + fixes: &[ + "add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user ", + ], + }), + "B011" => Some(RuleMetadata { + code: "B011", + name: "severalTableOwnerInSchema", + scope: "BASE", + description: "In a schema there are several tables owned by different owners.", + fixes: &["change table owners to the same functional role"], + }), + "S003" => Some(RuleMetadata { + code: "S003", + name: "unsecuredPublicSchema", + scope: "SCHEMA", + description: "Only authorized users should be allowed to create objects.", + fixes: &["REVOKE CREATE ON SCHEMA FROM PUBLIC"], + }), + _ => None, + } +} diff --git a/crates/pgls_pglinter/src/rule.rs b/crates/pgls_pglinter/src/rule.rs new file mode 100644 index 000000000..43941a770 --- /dev/null +++ b/crates/pgls_pglinter/src/rule.rs @@ -0,0 +1,21 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use pgls_analyse::RuleMeta; +#[doc = r" Trait for pglinter (database-level) rules"] +#[doc = r""] +#[doc = r" Pglinter rules are different from linter rules:"] +#[doc = r" - They execute SQL queries against the database via pglinter extension"] +#[doc = r" - They don't have AST-based execution"] +#[doc = r" - Rule logic is in the pglinter Postgres extension"] +#[doc = r" - Threshold configuration (warning/error levels) is handled by pglinter extension"] +pub trait PglinterRule: RuleMeta { + #[doc = r#" Rule code (e.g., "B001", "S001", "C001")"#] + const CODE: &'static str; + #[doc = r" Rule scope (BASE, SCHEMA, or CLUSTER)"] + const SCOPE: &'static str; + #[doc = r" Description of what the rule detects"] + const DESCRIPTION: &'static str; + #[doc = r" Suggested fixes for violations"] + const FIXES: &'static [&'static str]; +} diff --git a/crates/pgls_pglinter/src/rules/base/composite_primary_key_too_many_columns.rs b/crates/pgls_pglinter/src/rules/base/composite_primary_key_too_many_columns.rs new file mode 100644 index 000000000..7a04cf2be --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/composite_primary_key_too_many_columns.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# CompositePrimaryKeyTooManyColumns (B012)\n\nDetect tables with composite primary keys involving more than 4 columns\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"compositePrimaryKeyTooManyColumns\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- Consider redesigning the table to avoid composite primary keys with more than 4 columns\n- Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.\n\n## Documentation\n\nSee: "] pub CompositePrimaryKeyTooManyColumns { version : "1.0.0" , name : "compositePrimaryKeyTooManyColumns" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for CompositePrimaryKeyTooManyColumns { + const CODE: &'static str = "B012"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Detect tables with composite primary keys involving more than 4 columns"; + const FIXES: &'static [&'static str] = &[ + "Consider redesigning the table to avoid composite primary keys with more than 4 columns", + "Use surrogate keys (e.g., serial, UUID) instead of composite primary keys, and establish unique constraints on necessary column combinations, to enforce uniqueness.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_objects_with_uppercase.rs b/crates/pgls_pglinter/src/rules/base/how_many_objects_with_uppercase.rs new file mode 100644 index 000000000..8210f0068 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_objects_with_uppercase.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyObjectsWithUppercase (B005)\n\nCount number of objects with uppercase in name or in columns.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyObjectsWithUppercase\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- Do not use uppercase for any database objects\n\n## Documentation\n\nSee: "] pub HowManyObjectsWithUppercase { version : "1.0.0" , name : "howManyObjectsWithUppercase" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyObjectsWithUppercase { + const CODE: &'static str = "B005"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of objects with uppercase in name or in columns."; + const FIXES: &'static [&'static str] = &["Do not use uppercase for any database objects"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_redudant_index.rs b/crates/pgls_pglinter/src/rules/base/how_many_redudant_index.rs new file mode 100644 index 000000000..82ce8de0b --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_redudant_index.rs @@ -0,0 +1,13 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyRedudantIndex (B002)\n\nCount number of redundant index vs nb index.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyRedudantIndex\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyRedudantIndex { version : "1.0.0" , name : "howManyRedudantIndex" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyRedudantIndex { + const CODE: &'static str = "B002"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of redundant index vs nb index."; + const FIXES: &'static [&'static str] = &[ + "remove duplicated index or check if a constraint does not create a redundant index, or change warning/error threshold", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_table_without_index_on_fk.rs b/crates/pgls_pglinter/src/rules/base/how_many_table_without_index_on_fk.rs new file mode 100644 index 000000000..a737d529a --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_table_without_index_on_fk.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTableWithoutIndexOnFk (B003)\n\nCount number of tables without index on foreign key.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTableWithoutIndexOnFk\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- create a index on foreign key or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyTableWithoutIndexOnFk { version : "1.0.0" , name : "howManyTableWithoutIndexOnFk" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTableWithoutIndexOnFk { + const CODE: &'static str = "B003"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of tables without index on foreign key."; + const FIXES: &'static [&'static str] = + &["create a index on foreign key or change warning/error threshold"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_table_without_primary_key.rs b/crates/pgls_pglinter/src/rules/base/how_many_table_without_primary_key.rs new file mode 100644 index 000000000..b708f7055 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_table_without_primary_key.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTableWithoutPrimaryKey (B001)\n\nCount number of tables without primary key.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTableWithoutPrimaryKey\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- create a primary key or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyTableWithoutPrimaryKey { version : "1.0.0" , name : "howManyTableWithoutPrimaryKey" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTableWithoutPrimaryKey { + const CODE: &'static str = "B001"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of tables without primary key."; + const FIXES: &'static [&'static str] = + &["create a primary key or change warning/error threshold"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_never_selected.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_never_selected.rs new file mode 100644 index 000000000..4721485d6 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_never_selected.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesNeverSelected (B006)\n\nCount number of table(s) that has never been selected.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesNeverSelected\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- Is it necessary to update/delete/insert rows in table(s) that are never selected ?\n\n## Documentation\n\nSee: "] pub HowManyTablesNeverSelected { version : "1.0.0" , name : "howManyTablesNeverSelected" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesNeverSelected { + const CODE: &'static str = "B006"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of table(s) that has never been selected."; + const FIXES: &'static [&'static str] = + &["Is it necessary to update/delete/insert rows in table(s) that are never selected ?"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_mismatch.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_mismatch.rs new file mode 100644 index 000000000..3e887c1d4 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_mismatch.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithFkMismatch (B008)\n\nCount number of tables with foreign keys that do not match the key reference type.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithFkMismatch\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- Consider column type adjustments to ensure foreign key matches referenced key type\n- ask a dba\n\n## Documentation\n\nSee: "] pub HowManyTablesWithFkMismatch { version : "1.0.0" , name : "howManyTablesWithFkMismatch" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithFkMismatch { + const CODE: &'static str = "B008"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of tables with foreign keys that do not match the key reference type."; + const FIXES: &'static [&'static str] = &[ + "Consider column type adjustments to ensure foreign key matches referenced key type", + "ask a dba", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_outside_schema.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_outside_schema.rs new file mode 100644 index 000000000..f25318891 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_fk_outside_schema.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithFkOutsideSchema (B007)\n\nCount number of tables with foreign keys outside their schema.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithFkOutsideSchema\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- Consider restructuring schema design to keep related tables in same schema\n- ask a dba\n\n## Documentation\n\nSee: "] pub HowManyTablesWithFkOutsideSchema { version : "1.0.0" , name : "howManyTablesWithFkOutsideSchema" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithFkOutsideSchema { + const CODE: &'static str = "B007"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of tables with foreign keys outside their schema."; + const FIXES: &'static [&'static str] = &[ + "Consider restructuring schema design to keep related tables in same schema", + "ask a dba", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_reserved_keywords.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_reserved_keywords.rs new file mode 100644 index 000000000..7b7efda8f --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_reserved_keywords.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithReservedKeywords (B010)\n\nCount number of database objects using reserved keywords in their names.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithReservedKeywords\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- Rename database objects to avoid using reserved keywords.\n- Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.\n\n## Documentation\n\nSee: "] pub HowManyTablesWithReservedKeywords { version : "1.0.0" , name : "howManyTablesWithReservedKeywords" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithReservedKeywords { + const CODE: &'static str = "B010"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of database objects using reserved keywords in their names."; + const FIXES: &'static [&'static str] = &[ + "Rename database objects to avoid using reserved keywords.", + "Using reserved keywords can lead to SQL syntax errors and maintenance difficulties.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_tables_with_same_trigger.rs b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_same_trigger.rs new file mode 100644 index 000000000..70cef3a36 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_tables_with_same_trigger.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyTablesWithSameTrigger (B009)\n\nCount number of tables using the same trigger vs nb table with their own triggers.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyTablesWithSameTrigger\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- For more readability and other considerations use one trigger function per table.\n- Sharing the same trigger function add more complexity.\n\n## Documentation\n\nSee: "] pub HowManyTablesWithSameTrigger { version : "1.0.0" , name : "howManyTablesWithSameTrigger" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyTablesWithSameTrigger { + const CODE: &'static str = "B009"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "Count number of tables using the same trigger vs nb table with their own triggers."; + const FIXES: &'static [&'static str] = &[ + "For more readability and other considerations use one trigger function per table.", + "Sharing the same trigger function add more complexity.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/base/how_many_unused_index.rs b/crates/pgls_pglinter/src/rules/base/how_many_unused_index.rs new file mode 100644 index 000000000..805fb5a16 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/how_many_unused_index.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# HowManyUnusedIndex (B004)\n\nCount number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"howManyUnusedIndex\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- remove unused index or change warning/error threshold\n\n## Documentation\n\nSee: "] pub HowManyUnusedIndex { version : "1.0.0" , name : "howManyUnusedIndex" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for HowManyUnusedIndex { + const CODE: &'static str = "B004"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = "Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)"; + const FIXES: &'static [&'static str] = + &["remove unused index or change warning/error threshold"]; +} diff --git a/crates/pgls_pglinter/src/rules/base/mod.rs b/crates/pgls_pglinter/src/rules/base/mod.rs new file mode 100644 index 000000000..c51a6b08d --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/mod.rs @@ -0,0 +1,16 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod composite_primary_key_too_many_columns; +pub mod how_many_objects_with_uppercase; +pub mod how_many_redudant_index; +pub mod how_many_table_without_index_on_fk; +pub mod how_many_table_without_primary_key; +pub mod how_many_tables_never_selected; +pub mod how_many_tables_with_fk_mismatch; +pub mod how_many_tables_with_fk_outside_schema; +pub mod how_many_tables_with_reserved_keywords; +pub mod how_many_tables_with_same_trigger; +pub mod how_many_unused_index; +pub mod several_table_owner_in_schema; +::pgls_analyse::declare_lint_group! { pub Base { name : "base" , rules : [self :: composite_primary_key_too_many_columns :: CompositePrimaryKeyTooManyColumns , self :: how_many_objects_with_uppercase :: HowManyObjectsWithUppercase , self :: how_many_redudant_index :: HowManyRedudantIndex , self :: how_many_table_without_index_on_fk :: HowManyTableWithoutIndexOnFk , self :: how_many_table_without_primary_key :: HowManyTableWithoutPrimaryKey , self :: how_many_tables_never_selected :: HowManyTablesNeverSelected , self :: how_many_tables_with_fk_mismatch :: HowManyTablesWithFkMismatch , self :: how_many_tables_with_fk_outside_schema :: HowManyTablesWithFkOutsideSchema , self :: how_many_tables_with_reserved_keywords :: HowManyTablesWithReservedKeywords , self :: how_many_tables_with_same_trigger :: HowManyTablesWithSameTrigger , self :: how_many_unused_index :: HowManyUnusedIndex , self :: several_table_owner_in_schema :: SeveralTableOwnerInSchema ,] } } diff --git a/crates/pgls_pglinter/src/rules/base/several_table_owner_in_schema.rs b/crates/pgls_pglinter/src/rules/base/several_table_owner_in_schema.rs new file mode 100644 index 000000000..d42899411 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/base/several_table_owner_in_schema.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SeveralTableOwnerInSchema (B011)\n\nIn a schema there are several tables owned by different owners.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"base\": {\n \"severalTableOwnerInSchema\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- change table owners to the same functional role\n\n## Documentation\n\nSee: "] pub SeveralTableOwnerInSchema { version : "1.0.0" , name : "severalTableOwnerInSchema" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SeveralTableOwnerInSchema { + const CODE: &'static str = "B011"; + const SCOPE: &'static str = "BASE"; + const DESCRIPTION: &'static str = + "In a schema there are several tables owned by different owners."; + const FIXES: &'static [&'static str] = &["change table owners to the same functional role"]; +} diff --git a/crates/pgls_pglinter/src/rules/cluster/mod.rs b/crates/pgls_pglinter/src/rules/cluster/mod.rs new file mode 100644 index 000000000..7e948dd5c --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/mod.rs @@ -0,0 +1,7 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod password_encryption_is_md5; +pub mod pg_hba_entries_with_method_trust_or_password_should_not_exists; +pub mod pg_hba_entries_with_method_trust_should_not_exists; +::pgls_analyse::declare_lint_group! { pub Cluster { name : "cluster" , rules : [self :: password_encryption_is_md5 :: PasswordEncryptionIsMd5 , self :: pg_hba_entries_with_method_trust_or_password_should_not_exists :: PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists , self :: pg_hba_entries_with_method_trust_should_not_exists :: PgHbaEntriesWithMethodTrustShouldNotExists ,] } } diff --git a/crates/pgls_pglinter/src/rules/cluster/password_encryption_is_md5.rs b/crates/pgls_pglinter/src/rules/cluster/password_encryption_is_md5.rs new file mode 100644 index 000000000..7dad19a17 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/password_encryption_is_md5.rs @@ -0,0 +1,15 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# PasswordEncryptionIsMd5 (C003)\n\nThis configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"cluster\": {\n \"passwordEncryptionIsMd5\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = \n- scram-sha-256\n- ). Warning, you will need to reset all passwords after this parameter is updated.\n\n## Documentation\n\nSee: "] pub PasswordEncryptionIsMd5 { version : "1.0.0" , name : "passwordEncryptionIsMd5" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for PasswordEncryptionIsMd5 { + const CODE: &'static str = "C003"; + const SCOPE: &'static str = "CLUSTER"; + const DESCRIPTION: &'static str = "This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256."; + const FIXES: &'static [&'static str] = &[ + "change password_encryption parameter to scram-sha-256 (ALTER SYSTEM SET password_encryption = ", + "scram-sha-256", + " ). Warning, you will need to reset all passwords after this parameter is updated.", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_or_password_should_not_exists.rs b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_or_password_should_not_exists.rs new file mode 100644 index 000000000..8cbb7e5d6 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_or_password_should_not_exists.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002)\n\nThis configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"cluster\": {\n \"pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change trust or password method in pg_hba.conf\n\n## Documentation\n\nSee: "] pub PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists { version : "1.0.0" , name : "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists { + const CODE: &'static str = "C002"; + const SCOPE: &'static str = "CLUSTER"; + const DESCRIPTION: &'static str = "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."; + const FIXES: &'static [&'static str] = &["change trust or password method in pg_hba.conf"]; +} diff --git a/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_should_not_exists.rs b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_should_not_exists.rs new file mode 100644 index 000000000..096ae3bbc --- /dev/null +++ b/crates/pgls_pglinter/src/rules/cluster/pg_hba_entries_with_method_trust_should_not_exists.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# PgHbaEntriesWithMethodTrustShouldNotExists (C001)\n\nThis configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"cluster\": {\n \"pgHbaEntriesWithMethodTrustShouldNotExists\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change trust method in pg_hba.conf\n\n## Documentation\n\nSee: "] pub PgHbaEntriesWithMethodTrustShouldNotExists { version : "1.0.0" , name : "pgHbaEntriesWithMethodTrustShouldNotExists" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for PgHbaEntriesWithMethodTrustShouldNotExists { + const CODE: &'static str = "C001"; + const SCOPE: &'static str = "CLUSTER"; + const DESCRIPTION: &'static str = "This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only."; + const FIXES: &'static [&'static str] = &["change trust method in pg_hba.conf"]; +} diff --git a/crates/pgls_pglinter/src/rules/mod.rs b/crates/pgls_pglinter/src/rules/mod.rs new file mode 100644 index 000000000..428719e86 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/mod.rs @@ -0,0 +1,7 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod base; +pub mod cluster; +pub mod schema; +::pgls_analyse::declare_category! { pub PgLinter { kind : Lint , groups : [self :: base :: Base , self :: cluster :: Cluster , self :: schema :: Schema ,] } } diff --git a/crates/pgls_pglinter/src/rules/schema/mod.rs b/crates/pgls_pglinter/src/rules/schema/mod.rs new file mode 100644 index 000000000..772dfa25c --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/mod.rs @@ -0,0 +1,9 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +pub mod owner_schema_is_internal_role; +pub mod schema_owner_do_not_match_table_owner; +pub mod schema_prefixed_or_suffixed_with_envt; +pub mod schema_with_default_role_not_granted; +pub mod unsecured_public_schema; +::pgls_analyse::declare_lint_group! { pub Schema { name : "schema" , rules : [self :: owner_schema_is_internal_role :: OwnerSchemaIsInternalRole , self :: schema_owner_do_not_match_table_owner :: SchemaOwnerDoNotMatchTableOwner , self :: schema_prefixed_or_suffixed_with_envt :: SchemaPrefixedOrSuffixedWithEnvt , self :: schema_with_default_role_not_granted :: SchemaWithDefaultRoleNotGranted , self :: unsecured_public_schema :: UnsecuredPublicSchema ,] } } diff --git a/crates/pgls_pglinter/src/rules/schema/owner_schema_is_internal_role.rs b/crates/pgls_pglinter/src/rules/schema/owner_schema_is_internal_role.rs new file mode 100644 index 000000000..7abbc1d89 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/owner_schema_is_internal_role.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# OwnerSchemaIsInternalRole (S004)\n\nOwner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"ownerSchemaIsInternalRole\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- change schema owner to a functional role\n\n## Documentation\n\nSee: "] pub OwnerSchemaIsInternalRole { version : "1.0.0" , name : "ownerSchemaIsInternalRole" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for OwnerSchemaIsInternalRole { + const CODE: &'static str = "S004"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary)."; + const FIXES: &'static [&'static str] = &["change schema owner to a functional role"]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/schema_owner_do_not_match_table_owner.rs b/crates/pgls_pglinter/src/rules/schema/schema_owner_do_not_match_table_owner.rs new file mode 100644 index 000000000..8072bfa1d --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/schema_owner_do_not_match_table_owner.rs @@ -0,0 +1,12 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SchemaOwnerDoNotMatchTableOwner (S005)\n\nThe schema owner and tables in the schema do not match.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"schemaOwnerDoNotMatchTableOwner\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 20%\n- Error level: 80%\n\n## Fixes\n\n- For maintenance facilities, schema and tables owners should be the same.\n\n## Documentation\n\nSee: "] pub SchemaOwnerDoNotMatchTableOwner { version : "1.0.0" , name : "schemaOwnerDoNotMatchTableOwner" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SchemaOwnerDoNotMatchTableOwner { + const CODE: &'static str = "S005"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "The schema owner and tables in the schema do not match."; + const FIXES: &'static [&'static str] = + &["For maintenance facilities, schema and tables owners should be the same."]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/schema_prefixed_or_suffixed_with_envt.rs b/crates/pgls_pglinter/src/rules/schema/schema_prefixed_or_suffixed_with_envt.rs new file mode 100644 index 000000000..4c6ad66a2 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/schema_prefixed_or_suffixed_with_envt.rs @@ -0,0 +1,13 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SchemaPrefixedOrSuffixedWithEnvt (S002)\n\nThe schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"schemaPrefixedOrSuffixedWithEnvt\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 1%\n\n## Fixes\n\n- Keep the same schema name across environments. Prefer prefix or suffix the database name\n\n## Documentation\n\nSee: "] pub SchemaPrefixedOrSuffixedWithEnvt { version : "1.0.0" , name : "schemaPrefixedOrSuffixedWithEnvt" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SchemaPrefixedOrSuffixedWithEnvt { + const CODE: &'static str = "S002"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy."; + const FIXES: &'static [&'static str] = &[ + "Keep the same schema name across environments. Prefer prefix or suffix the database name", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/schema_with_default_role_not_granted.rs b/crates/pgls_pglinter/src/rules/schema/schema_with_default_role_not_granted.rs new file mode 100644 index 000000000..eb7c221a8 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/schema_with_default_role_not_granted.rs @@ -0,0 +1,14 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# SchemaWithDefaultRoleNotGranted (S001)\n\nThe schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"schemaWithDefaultRoleNotGranted\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 1%\n\n## Fixes\n\n- add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user \n\n## Documentation\n\nSee: "] pub SchemaWithDefaultRoleNotGranted { version : "1.0.0" , name : "schemaWithDefaultRoleNotGranted" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for SchemaWithDefaultRoleNotGranted { + const CODE: &'static str = "S001"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it."; + const FIXES: &'static [&'static str] = &[ + "add a default privilege=> ALTER DEFAULT PRIVILEGES IN SCHEMA for user ", + ]; +} diff --git a/crates/pgls_pglinter/src/rules/schema/unsecured_public_schema.rs b/crates/pgls_pglinter/src/rules/schema/unsecured_public_schema.rs new file mode 100644 index 000000000..b71069ff6 --- /dev/null +++ b/crates/pgls_pglinter/src/rules/schema/unsecured_public_schema.rs @@ -0,0 +1,11 @@ +//! Generated file, do not edit by hand, see `xtask/codegen` + +#![doc = r" Generated file, do not edit by hand, see `xtask/codegen`"] +use crate::rule::PglinterRule; +::pgls_analyse::declare_rule! { # [doc = "# UnsecuredPublicSchema (S003)\n\nOnly authorized users should be allowed to create objects.\n\n## Configuration\n\nEnable or disable this rule in your configuration:\n\n```json\n{\n \"pglinter\": {\n \"rules\": {\n \"schema\": {\n \"unsecuredPublicSchema\": \"warn\"\n }\n }\n }\n}\n```\n\n## Thresholds\n\n- Warning level: 1%\n- Error level: 80%\n\n## Fixes\n\n- REVOKE CREATE ON SCHEMA FROM PUBLIC\n\n## Documentation\n\nSee: "] pub UnsecuredPublicSchema { version : "1.0.0" , name : "unsecuredPublicSchema" , severity : pgls_diagnostics :: Severity :: Warning , recommended : true , } } +impl PglinterRule for UnsecuredPublicSchema { + const CODE: &'static str = "S003"; + const SCOPE: &'static str = "SCHEMA"; + const DESCRIPTION: &'static str = "Only authorized users should be allowed to create objects."; + const FIXES: &'static [&'static str] = &["REVOKE CREATE ON SCHEMA FROM PUBLIC"]; +} diff --git a/crates/pgls_pglinter/src/sarif.rs b/crates/pgls_pglinter/src/sarif.rs new file mode 100644 index 000000000..d57d0979f --- /dev/null +++ b/crates/pgls_pglinter/src/sarif.rs @@ -0,0 +1,172 @@ +//! Generic SARIF (Static Analysis Results Interchange Format) parser +//! +//! SARIF is a standard format for static analysis tool output. +//! See: https://sarifweb.azurewebsites.net/ + +use serde::Deserialize; + +/// SARIF 2.1.0 root object +#[derive(Debug, Deserialize)] +pub struct SarifLog { + #[serde(default)] + pub runs: Vec, +} + +/// A single run of a static analysis tool +#[derive(Debug, Deserialize)] +pub struct Run { + #[serde(default)] + pub results: Vec, + pub tool: Option, +} + +/// Information about the tool that produced the results +#[derive(Debug, Deserialize)] +pub struct Tool { + pub driver: Option, +} + +/// The tool driver (main component) +#[derive(Debug, Deserialize)] +pub struct Driver { + pub name: Option, + pub version: Option, +} + +/// A single result from the analysis +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Result { + /// The rule ID that was violated + pub rule_id: Option, + /// Severity level: "error", "warning", "note", "none" + pub level: Option, + /// The result message + pub message: Option, + /// Locations where the issue was found + #[serde(default)] + pub locations: Vec, +} + +/// A message with text content +#[derive(Debug, Deserialize)] +pub struct Message { + pub text: Option, +} + +/// A location in the source +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Location { + pub physical_location: Option, + pub logical_locations: Option>, +} + +/// A physical location (file, line, column) +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PhysicalLocation { + pub artifact_location: Option, + pub region: Option, +} + +/// Location of an artifact (file) +#[derive(Debug, Deserialize)] +pub struct ArtifactLocation { + pub uri: Option, +} + +/// A region within a file +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Region { + pub start_line: Option, + pub start_column: Option, + pub end_line: Option, + pub end_column: Option, +} + +/// A logical location (schema, table, function name, etc.) +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LogicalLocation { + pub name: Option, + pub fully_qualified_name: Option, + pub kind: Option, +} + +impl SarifLog { + /// Parse SARIF JSON into a structured log + pub fn parse(json: &str) -> std::result::Result { + serde_json::from_str(json) + } + + /// Get all results from all runs + pub fn all_results(&self) -> impl Iterator { + self.runs.iter().flat_map(|run| run.results.iter()) + } + + /// Check if there are any results + pub fn has_results(&self) -> bool { + self.runs.iter().any(|run| !run.results.is_empty()) + } +} + +impl Result { + /// Get the severity level, defaulting to "warning" + pub fn level_str(&self) -> &str { + self.level.as_deref().unwrap_or("warning") + } + + /// Get the message text, defaulting to empty string + pub fn message_text(&self) -> &str { + self.message + .as_ref() + .and_then(|m| m.text.as_deref()) + .unwrap_or("") + } + + /// Get logical location names (e.g., affected database objects) + pub fn logical_location_names(&self) -> Vec<&str> { + self.locations + .iter() + .filter_map(|loc| loc.logical_locations.as_ref()) + .flatten() + .filter_map(|ll| ll.fully_qualified_name.as_deref().or(ll.name.as_deref())) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_minimal_sarif() { + let json = r#"{ + "runs": [{ + "results": [{ + "ruleId": "B001", + "level": "warning", + "message": { "text": "Table without primary key" } + }] + }] + }"#; + + let log = SarifLog::parse(json).unwrap(); + assert!(log.has_results()); + + let results: Vec<_> = log.all_results().collect(); + assert_eq!(results.len(), 1); + assert_eq!(results[0].rule_id.as_deref(), Some("B001")); + assert_eq!(results[0].level_str(), "warning"); + assert_eq!(results[0].message_text(), "Table without primary key"); + } + + #[test] + fn test_parse_empty_sarif() { + let json = r#"{"runs": [{"results": []}]}"#; + let log = SarifLog::parse(json).unwrap(); + assert!(!log.has_results()); + } +} diff --git a/crates/pgls_pglinter/tests/diagnostics.rs b/crates/pgls_pglinter/tests/diagnostics.rs new file mode 100644 index 000000000..abdd68bcd --- /dev/null +++ b/crates/pgls_pglinter/tests/diagnostics.rs @@ -0,0 +1,267 @@ +//! Integration tests for pglinter diagnostics +//! +//! These tests require the pglinter extension to be installed in the test database. + +use pgls_analyse::AnalysisFilter; +use pgls_console::fmt::{Formatter, HTML}; +use pgls_diagnostics::{Diagnostic, LogCategory, Visit}; +use pgls_pglinter::{PglinterCache, PglinterParams, run_pglinter}; +use pgls_schema_cache::SchemaCache; +use sqlx::PgPool; +use std::fmt::Write; +use std::io; + +struct TestVisitor { + logs: Vec, +} + +impl TestVisitor { + fn new() -> Self { + Self { logs: Vec::new() } + } + + fn into_string(self) -> String { + self.logs.join("\n") + } +} + +impl Visit for TestVisitor { + fn record_log( + &mut self, + category: LogCategory, + text: &dyn pgls_console::fmt::Display, + ) -> io::Result<()> { + let prefix = match category { + LogCategory::None => "", + LogCategory::Info => "[Info] ", + LogCategory::Warn => "[Warn] ", + LogCategory::Error => "[Error] ", + }; + + let mut buffer = vec![]; + let mut writer = HTML::new(&mut buffer); + let mut formatter = Formatter::new(&mut writer); + text.fmt(&mut formatter)?; + + let text_str = String::from_utf8(buffer).unwrap(); + self.logs.push(format!("{prefix}{text_str}")); + Ok(()) + } +} + +struct TestSetup<'a> { + name: &'a str, + setup: &'a str, + test_db: &'a PgPool, +} + +impl TestSetup<'_> { + async fn test(self) { + // Load schema cache + let schema_cache = SchemaCache::load(self.test_db) + .await + .expect("Failed to load schema cache"); + + // Assert pglinter extension is installed + assert!( + schema_cache.extensions.iter().any(|e| e.name == "pglinter"), + "pglinter extension must be installed for tests to run" + ); + + // Run setup SQL + sqlx::raw_sql(self.setup) + .execute(self.test_db) + .await + .expect("Failed to setup test database"); + + // Reload schema cache after setup + let schema_cache = SchemaCache::load(self.test_db) + .await + .expect("Failed to reload schema cache"); + + // Load pglinter cache + let cache = PglinterCache::load(self.test_db, &schema_cache) + .await + .expect("Failed to load pglinter cache"); + + // Run pglinter checks with all rules enabled + let filter = AnalysisFilter::default(); + let diagnostics = run_pglinter( + PglinterParams { + conn: self.test_db, + schema_cache: &schema_cache, + }, + &filter, + Some(&cache), + ) + .await + .expect("Failed to run pglinter checks"); + + let content = if diagnostics.is_empty() { + String::from("No Diagnostics") + } else { + let mut result = String::new(); + + for (idx, diagnostic) in diagnostics.iter().enumerate() { + if idx > 0 { + writeln!(&mut result).unwrap(); + writeln!(&mut result, "---").unwrap(); + writeln!(&mut result).unwrap(); + } + + // Write category + let category_name = diagnostic.category().map(|c| c.name()).unwrap_or("unknown"); + writeln!(&mut result, "Category: {category_name}").unwrap(); + + // Write severity + writeln!(&mut result, "Severity: {:?}", diagnostic.severity()).unwrap(); + + // Write message + let mut msg_content = vec![]; + let mut writer = HTML::new(&mut msg_content); + let mut formatter = Formatter::new(&mut writer); + diagnostic.message(&mut formatter).unwrap(); + writeln!( + &mut result, + "Message: {}", + String::from_utf8(msg_content).unwrap() + ) + .unwrap(); + + // Write advices using custom visitor + let mut visitor = TestVisitor::new(); + diagnostic.advices(&mut visitor).unwrap(); + let advice_text = visitor.into_string(); + if !advice_text.is_empty() { + writeln!(&mut result, "Advices:\n{advice_text}").unwrap(); + } + } + + result + }; + + insta::with_settings!({ + prepend_module_to_snapshot => false, + }, { + insta::assert_snapshot!(self.name, content); + }); + } +} + +/// Test that checks extension availability +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn extension_check(test_db: PgPool) { + let schema_cache = SchemaCache::load(&test_db) + .await + .expect("Failed to load schema cache"); + + assert!( + schema_cache.extensions.iter().any(|e| e.name == "pglinter"), + "pglinter extension must be installed for tests to run" + ); +} + +/// Test B001: Table without primary key +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn table_without_primary_key(test_db: PgPool) { + TestSetup { + name: "table_without_primary_key", + setup: r#" + CREATE TABLE public.test_no_pk ( + name text, + value integer + ); + "#, + test_db: &test_db, + } + .test() + .await; +} + +/// Test with a clean table (has primary key) +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn table_with_primary_key(test_db: PgPool) { + TestSetup { + name: "table_with_primary_key", + setup: r#" + CREATE TABLE public.test_with_pk ( + id serial PRIMARY KEY, + name text + ); + "#, + test_db: &test_db, + } + .test() + .await; +} + +/// Test B005: Objects with uppercase names +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn objects_with_uppercase(test_db: PgPool) { + TestSetup { + name: "objects_with_uppercase", + setup: r#" + CREATE TABLE public."TestTable" ( + id serial PRIMARY KEY, + "UserName" text + ); + "#, + test_db: &test_db, + } + .test() + .await; +} + +/// Test B003: Foreign key without index +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn fk_without_index(test_db: PgPool) { + TestSetup { + name: "fk_without_index", + setup: r#" + CREATE TABLE public.parent_table ( + id serial PRIMARY KEY, + name text + ); + + CREATE TABLE public.child_table ( + id serial PRIMARY KEY, + parent_id integer NOT NULL REFERENCES public.parent_table(id) + ); + "#, + test_db: &test_db, + } + .test() + .await; +} + +/// Test multiple issues at once +#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] +async fn multiple_issues(test_db: PgPool) { + TestSetup { + name: "multiple_issues", + setup: r#" + -- Table without primary key + CREATE TABLE public.no_pk ( + name text + ); + + -- Table with uppercase name + CREATE TABLE public."BadName" ( + id serial PRIMARY KEY + ); + + -- FK without index + CREATE TABLE public.ref_parent ( + id serial PRIMARY KEY + ); + + CREATE TABLE public.ref_child ( + id serial PRIMARY KEY, + parent_id integer REFERENCES public.ref_parent(id) + ); + "#, + test_db: &test_db, + } + .test() + .await; +} diff --git a/crates/pgls_workspace/src/settings.rs b/crates/pgls_workspace/src/settings.rs index 001361d0e..727cf25af 100644 --- a/crates/pgls_workspace/src/settings.rs +++ b/crates/pgls_workspace/src/settings.rs @@ -18,6 +18,7 @@ use pgls_configuration::{ diagnostics::InvalidIgnorePattern, files::FilesConfiguration, migrations::{MigrationsConfiguration, PartialMigrationsConfiguration}, + pglinter::PglinterConfiguration, plpgsql_check::PlPgSqlCheckConfiguration, splinter::SplinterConfiguration, }; @@ -217,6 +218,9 @@ pub struct Settings { /// Splinter (database linter) settings for the workspace pub splinter: SplinterSettings, + /// Pglinter (database linter via pglinter extension) settings for the workspace + pub pglinter: PglinterSettings, + /// Type checking settings for the workspace pub typecheck: TypecheckSettings, @@ -263,6 +267,11 @@ impl Settings { self.splinter = to_splinter_settings(SplinterConfiguration::from(splinter)); } + // pglinter part + if let Some(pglinter) = configuration.pglinter { + self.pglinter = to_pglinter_settings(PglinterConfiguration::from(pglinter)); + } + // typecheck part if let Some(typecheck) = configuration.typecheck { self.typecheck = to_typecheck_settings(TypecheckConfiguration::from(typecheck)); @@ -300,6 +309,11 @@ impl Settings { self.splinter.rules.as_ref().map(Cow::Borrowed) } + /// Returns pglinter rules. + pub fn as_pglinter_rules(&self) -> Option> { + self.pglinter.rules.as_ref().map(Cow::Borrowed) + } + /// It retrieves the severity based on the `code` of the rule and the current configuration. /// /// The code of the has the following pattern: `{group}/{rule_name}`. @@ -335,6 +349,13 @@ fn to_splinter_settings(conf: SplinterConfiguration) -> SplinterSettings { } } +fn to_pglinter_settings(conf: PglinterConfiguration) -> PglinterSettings { + PglinterSettings { + enabled: conf.enabled, + rules: Some(conf.rules), + } +} + fn to_typecheck_settings(conf: TypecheckConfiguration) -> TypecheckSettings { TypecheckSettings { search_path: conf.search_path.into_iter().collect(), @@ -474,6 +495,25 @@ impl Default for SplinterSettings { } } +/// Pglinter (database linter via pglinter extension) settings for the entire workspace +#[derive(Debug)] +pub struct PglinterSettings { + /// Disabled by default (pglinter extension might not be installed) + pub enabled: bool, + + /// List of rules + pub rules: Option, +} + +impl Default for PglinterSettings { + fn default() -> Self { + Self { + enabled: false, // Disabled by default since pglinter extension might not be installed + rules: Some(pgls_configuration::pglinter::Rules::default()), + } + } +} + /// Type checking settings for the entire workspace #[derive(Debug)] pub struct PlPgSqlCheckSettings { diff --git a/docs/schema.json b/docs/schema.json index 71db27702..227437763 100644 --- a/docs/schema.json +++ b/docs/schema.json @@ -66,6 +66,17 @@ } ] }, + "pglinter": { + "description": "The configuration for pglinter", + "anyOf": [ + { + "$ref": "#/definitions/PglinterConfiguration" + }, + { + "type": "null" + } + ] + }, "plpgsqlCheck": { "description": "The configuration for type checking", "anyOf": [ @@ -113,6 +124,213 @@ }, "additionalProperties": false, "definitions": { + "Base": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] + }, + "compositePrimaryKeyTooManyColumns": { + "description": "CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyObjectsWithUppercase": { + "description": "HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyRedudantIndex": { + "description": "HowManyRedudantIndex (B002): Count number of redundant index vs nb index.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTableWithoutIndexOnFk": { + "description": "HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTableWithoutPrimaryKey": { + "description": "HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesNeverSelected": { + "description": "HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithFkMismatch": { + "description": "HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithFkOutsideSchema": { + "description": "HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithReservedKeywords": { + "description": "HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyTablesWithSameTrigger": { + "description": "HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "howManyUnusedIndex": { + "description": "HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.)", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + }, + "severalTableOwnerInSchema": { + "description": "SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "Cluster": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] + }, + "passwordEncryptionIsMd5": { + "description": "PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists": { + "description": "PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "pgHbaEntriesWithMethodTrustShouldNotExists": { + "description": "PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + } + }, + "additionalProperties": false + }, "DatabaseConfiguration": { "description": "The configuration of the database connection.", "type": "object", @@ -418,6 +636,80 @@ }, "additionalProperties": false }, + "PglinterConfiguration": { + "type": "object", + "properties": { + "enabled": { + "description": "if `false`, it disables the feature and the linter won't be executed. `true` by default", + "type": [ + "boolean", + "null" + ] + }, + "rules": { + "description": "List of rules", + "anyOf": [ + { + "$ref": "#/definitions/PglinterRules" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "PglinterRules": { + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules. The rules that belong to `nursery` won't be enabled.", + "type": [ + "boolean", + "null" + ] + }, + "base": { + "anyOf": [ + { + "$ref": "#/definitions/Base" + }, + { + "type": "null" + } + ] + }, + "cluster": { + "anyOf": [ + { + "$ref": "#/definitions/Cluster" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the lint rules recommended by Postgres Language Server. `true` by default.", + "type": [ + "boolean", + "null" + ] + }, + "schema": { + "anyOf": [ + { + "$ref": "#/definitions/Schema" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, "PlPgSqlCheckConfiguration": { "description": "The configuration for type checking.", "type": "object", @@ -878,6 +1170,82 @@ }, "additionalProperties": false }, + "Schema": { + "description": "A list of rules that belong to this group", + "type": "object", + "properties": { + "all": { + "description": "It enables ALL rules for this group.", + "type": [ + "boolean", + "null" + ] + }, + "ownerSchemaIsInternalRole": { + "description": "OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary).", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "recommended": { + "description": "It enables the recommended rules for this group", + "type": [ + "boolean", + "null" + ] + }, + "schemaOwnerDoNotMatchTableOwner": { + "description": "SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "schemaPrefixedOrSuffixedWithEnvt": { + "description": "SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "schemaWithDefaultRoleNotGranted": { + "description": "SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + }, + "unsecuredPublicSchema": { + "description": "UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects.", + "anyOf": [ + { + "$ref": "#/definitions/RuleConfiguration" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, "Security": { "description": "A list of rules that belong to this group", "type": "object", diff --git a/justfile b/justfile index 5dd54a224..524fe326c 100644 --- a/justfile +++ b/justfile @@ -29,6 +29,7 @@ gen-lint: cargo run -p xtask_codegen -- configuration cargo run -p xtask_codegen -- bindings cargo run -p xtask_codegen -- splinter + cargo run -p xtask_codegen -- pglinter cargo run -p rules_check cargo run -p docs_codegen just format diff --git a/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts b/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts index c6b93982d..57540a891 100644 --- a/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgres-language-server/backend-jsonrpc/src/workspace.ts @@ -95,6 +95,28 @@ export type Category = | "lint/safety/requireConcurrentIndexDeletion" | "lint/safety/runningStatementWhileHoldingAccessExclusive" | "lint/safety/transactionNesting" + | "pglinter/extensionNotInstalled" + | "pglinter/ruleDisabledInExtension" + | "pglinter/base/compositePrimaryKeyTooManyColumns" + | "pglinter/base/howManyObjectsWithUppercase" + | "pglinter/base/howManyRedudantIndex" + | "pglinter/base/howManyTableWithoutIndexOnFk" + | "pglinter/base/howManyTableWithoutPrimaryKey" + | "pglinter/base/howManyTablesNeverSelected" + | "pglinter/base/howManyTablesWithFkMismatch" + | "pglinter/base/howManyTablesWithFkOutsideSchema" + | "pglinter/base/howManyTablesWithReservedKeywords" + | "pglinter/base/howManyTablesWithSameTrigger" + | "pglinter/base/howManyUnusedIndex" + | "pglinter/base/severalTableOwnerInSchema" + | "pglinter/cluster/passwordEncryptionIsMd5" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists" + | "pglinter/schema/ownerSchemaIsInternalRole" + | "pglinter/schema/schemaOwnerDoNotMatchTableOwner" + | "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt" + | "pglinter/schema/schemaWithDefaultRoleNotGranted" + | "pglinter/schema/unsecuredPublicSchema" | "splinter/performance/authRlsInitplan" | "splinter/performance/duplicateIndex" | "splinter/performance/multiplePermissivePolicies" @@ -135,7 +157,11 @@ export type Category = | "lint/safety" | "splinter" | "splinter/performance" - | "splinter/security"; + | "splinter/security" + | "pglinter" + | "pglinter/base" + | "pglinter/cluster" + | "pglinter/schema"; export interface Location { path?: Resource_for_String; sourceCode?: string; @@ -310,6 +336,10 @@ export interface PartialConfiguration { * Configure migrations */ migrations?: PartialMigrationsConfiguration; + /** + * The configuration for pglinter + */ + pglinter?: PartialPglinterConfiguration; /** * The configuration for type checking */ @@ -410,6 +440,16 @@ export interface PartialMigrationsConfiguration { */ migrationsDir?: string; } +export interface PartialPglinterConfiguration { + /** + * if `false`, it disables the feature and the linter won't be executed. `true` by default + */ + enabled?: boolean; + /** + * List of rules + */ + rules?: PglinterRules; +} /** * The configuration for type checking. */ @@ -480,6 +520,19 @@ export interface LinterRules { recommended?: boolean; safety?: Safety; } +export interface PglinterRules { + /** + * It enables ALL rules. The rules that belong to `nursery` won't be enabled. + */ + all?: boolean; + base?: Base; + cluster?: Cluster; + /** + * It enables the lint rules recommended by Postgres Language Server. `true` by default. + */ + recommended?: boolean; + schema?: Schema; +} export interface SplinterRules { /** * It enables ALL rules. The rules that belong to `nursery` won't be enabled. @@ -638,6 +691,125 @@ export interface Safety { */ transactionNesting?: RuleConfiguration_for_Null; } +/** + * A list of rules that belong to this group + */ +export interface Base { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns + */ + compositePrimaryKeyTooManyColumns?: RuleConfiguration_for_Null; + /** + * HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns. + */ + howManyObjectsWithUppercase?: RuleConfiguration_for_Null; + /** + * HowManyRedudantIndex (B002): Count number of redundant index vs nb index. + */ + howManyRedudantIndex?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key. + */ + howManyTableWithoutIndexOnFk?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key. + */ + howManyTableWithoutPrimaryKey?: RuleConfiguration_for_Null; + /** + * HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected. + */ + howManyTablesNeverSelected?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type. + */ + howManyTablesWithFkMismatch?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema. + */ + howManyTablesWithFkOutsideSchema?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names. + */ + howManyTablesWithReservedKeywords?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers. + */ + howManyTablesWithSameTrigger?: RuleConfiguration_for_Null; + /** + * HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.) + */ + howManyUnusedIndex?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners. + */ + severalTableOwnerInSchema?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Cluster { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256. + */ + passwordEncryptionIsMd5?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustShouldNotExists?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; +} +/** + * A list of rules that belong to this group + */ +export interface Schema { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary). + */ + ownerSchemaIsInternalRole?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match. + */ + schemaOwnerDoNotMatchTableOwner?: RuleConfiguration_for_Null; + /** + * SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy. + */ + schemaPrefixedOrSuffixedWithEnvt?: RuleConfiguration_for_Null; + /** + * SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it. + */ + schemaWithDefaultRoleNotGranted?: RuleConfiguration_for_Null; + /** + * UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects. + */ + unsecuredPublicSchema?: RuleConfiguration_for_Null; +} /** * A list of rules that belong to this group */ diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index c6b93982d..57540a891 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -95,6 +95,28 @@ export type Category = | "lint/safety/requireConcurrentIndexDeletion" | "lint/safety/runningStatementWhileHoldingAccessExclusive" | "lint/safety/transactionNesting" + | "pglinter/extensionNotInstalled" + | "pglinter/ruleDisabledInExtension" + | "pglinter/base/compositePrimaryKeyTooManyColumns" + | "pglinter/base/howManyObjectsWithUppercase" + | "pglinter/base/howManyRedudantIndex" + | "pglinter/base/howManyTableWithoutIndexOnFk" + | "pglinter/base/howManyTableWithoutPrimaryKey" + | "pglinter/base/howManyTablesNeverSelected" + | "pglinter/base/howManyTablesWithFkMismatch" + | "pglinter/base/howManyTablesWithFkOutsideSchema" + | "pglinter/base/howManyTablesWithReservedKeywords" + | "pglinter/base/howManyTablesWithSameTrigger" + | "pglinter/base/howManyUnusedIndex" + | "pglinter/base/severalTableOwnerInSchema" + | "pglinter/cluster/passwordEncryptionIsMd5" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists" + | "pglinter/cluster/pgHbaEntriesWithMethodTrustShouldNotExists" + | "pglinter/schema/ownerSchemaIsInternalRole" + | "pglinter/schema/schemaOwnerDoNotMatchTableOwner" + | "pglinter/schema/schemaPrefixedOrSuffixedWithEnvt" + | "pglinter/schema/schemaWithDefaultRoleNotGranted" + | "pglinter/schema/unsecuredPublicSchema" | "splinter/performance/authRlsInitplan" | "splinter/performance/duplicateIndex" | "splinter/performance/multiplePermissivePolicies" @@ -135,7 +157,11 @@ export type Category = | "lint/safety" | "splinter" | "splinter/performance" - | "splinter/security"; + | "splinter/security" + | "pglinter" + | "pglinter/base" + | "pglinter/cluster" + | "pglinter/schema"; export interface Location { path?: Resource_for_String; sourceCode?: string; @@ -310,6 +336,10 @@ export interface PartialConfiguration { * Configure migrations */ migrations?: PartialMigrationsConfiguration; + /** + * The configuration for pglinter + */ + pglinter?: PartialPglinterConfiguration; /** * The configuration for type checking */ @@ -410,6 +440,16 @@ export interface PartialMigrationsConfiguration { */ migrationsDir?: string; } +export interface PartialPglinterConfiguration { + /** + * if `false`, it disables the feature and the linter won't be executed. `true` by default + */ + enabled?: boolean; + /** + * List of rules + */ + rules?: PglinterRules; +} /** * The configuration for type checking. */ @@ -480,6 +520,19 @@ export interface LinterRules { recommended?: boolean; safety?: Safety; } +export interface PglinterRules { + /** + * It enables ALL rules. The rules that belong to `nursery` won't be enabled. + */ + all?: boolean; + base?: Base; + cluster?: Cluster; + /** + * It enables the lint rules recommended by Postgres Language Server. `true` by default. + */ + recommended?: boolean; + schema?: Schema; +} export interface SplinterRules { /** * It enables ALL rules. The rules that belong to `nursery` won't be enabled. @@ -638,6 +691,125 @@ export interface Safety { */ transactionNesting?: RuleConfiguration_for_Null; } +/** + * A list of rules that belong to this group + */ +export interface Base { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * CompositePrimaryKeyTooManyColumns (B012): Detect tables with composite primary keys involving more than 4 columns + */ + compositePrimaryKeyTooManyColumns?: RuleConfiguration_for_Null; + /** + * HowManyObjectsWithUppercase (B005): Count number of objects with uppercase in name or in columns. + */ + howManyObjectsWithUppercase?: RuleConfiguration_for_Null; + /** + * HowManyRedudantIndex (B002): Count number of redundant index vs nb index. + */ + howManyRedudantIndex?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutIndexOnFk (B003): Count number of tables without index on foreign key. + */ + howManyTableWithoutIndexOnFk?: RuleConfiguration_for_Null; + /** + * HowManyTableWithoutPrimaryKey (B001): Count number of tables without primary key. + */ + howManyTableWithoutPrimaryKey?: RuleConfiguration_for_Null; + /** + * HowManyTablesNeverSelected (B006): Count number of table(s) that has never been selected. + */ + howManyTablesNeverSelected?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkMismatch (B008): Count number of tables with foreign keys that do not match the key reference type. + */ + howManyTablesWithFkMismatch?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithFkOutsideSchema (B007): Count number of tables with foreign keys outside their schema. + */ + howManyTablesWithFkOutsideSchema?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithReservedKeywords (B010): Count number of database objects using reserved keywords in their names. + */ + howManyTablesWithReservedKeywords?: RuleConfiguration_for_Null; + /** + * HowManyTablesWithSameTrigger (B009): Count number of tables using the same trigger vs nb table with their own triggers. + */ + howManyTablesWithSameTrigger?: RuleConfiguration_for_Null; + /** + * HowManyUnusedIndex (B004): Count number of unused index vs nb index (base on pg_stat_user_indexes, indexes associated to unique constraints are discard.) + */ + howManyUnusedIndex?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SeveralTableOwnerInSchema (B011): In a schema there are several tables owned by different owners. + */ + severalTableOwnerInSchema?: RuleConfiguration_for_Null; +} +/** + * A list of rules that belong to this group + */ +export interface Cluster { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * PasswordEncryptionIsMd5 (C003): This configuration is not secure anymore and will prevent an upgrade to Postgres 18. Warning, you will need to reset all passwords after this is changed to scram-sha-256. + */ + passwordEncryptionIsMd5?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustOrPasswordShouldNotExists (C002): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustOrPasswordShouldNotExists?: RuleConfiguration_for_Null; + /** + * PgHbaEntriesWithMethodTrustShouldNotExists (C001): This configuration is extremely insecure and should only be used in a controlled, non-production environment for testing purposes. In a production environment, you should use more secure authentication methods such as md5, scram-sha-256, or cert, and restrict access to trusted IP addresses only. + */ + pgHbaEntriesWithMethodTrustShouldNotExists?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; +} +/** + * A list of rules that belong to this group + */ +export interface Schema { + /** + * It enables ALL rules for this group. + */ + all?: boolean; + /** + * OwnerSchemaIsInternalRole (S004): Owner of schema should not be any internal pg roles, or owner is a superuser (not sure it is necesary). + */ + ownerSchemaIsInternalRole?: RuleConfiguration_for_Null; + /** + * It enables the recommended rules for this group + */ + recommended?: boolean; + /** + * SchemaOwnerDoNotMatchTableOwner (S005): The schema owner and tables in the schema do not match. + */ + schemaOwnerDoNotMatchTableOwner?: RuleConfiguration_for_Null; + /** + * SchemaPrefixedOrSuffixedWithEnvt (S002): The schema is prefixed with one of staging,stg,preprod,prod,sandbox,sbox string. Means that when you refresh your preprod, staging environments from production, you have to rename the target schema from prod_ to stg_ or something like. It is possible, but it is never easy. + */ + schemaPrefixedOrSuffixedWithEnvt?: RuleConfiguration_for_Null; + /** + * SchemaWithDefaultRoleNotGranted (S001): The schema has no default role. Means that futur table will not be granted through a role. So you will have to re-execute grants on it. + */ + schemaWithDefaultRoleNotGranted?: RuleConfiguration_for_Null; + /** + * UnsecuredPublicSchema (S003): Only authorized users should be allowed to create objects. + */ + unsecuredPublicSchema?: RuleConfiguration_for_Null; +} /** * A list of rules that belong to this group */ diff --git a/xtask/codegen/Cargo.toml b/xtask/codegen/Cargo.toml index c5e95ebe4..24898d319 100644 --- a/xtask/codegen/Cargo.toml +++ b/xtask/codegen/Cargo.toml @@ -16,9 +16,11 @@ pgls_analyse = { workspace = true } pgls_analyser = { workspace = true } pgls_diagnostics = { workspace = true } pgls_env = { workspace = true } +pgls_pglinter = { workspace = true } pgls_splinter = { workspace = true } pgls_workspace = { workspace = true, features = ["schema"] } proc-macro2 = { workspace = true, features = ["span-locations"] } pulldown-cmark = { version = "0.12.2" } quote = "1.0.36" +regex = "1.11" xtask = { path = '../', version = "0.0" } diff --git a/xtask/codegen/src/generate_configuration.rs b/xtask/codegen/src/generate_configuration.rs index 0f243909e..d4a699c11 100644 --- a/xtask/codegen/src/generate_configuration.rs +++ b/xtask/codegen/src/generate_configuration.rs @@ -81,7 +81,7 @@ const TOOLS: &[ToolConfig] = &[ ToolConfig::new("linter", RuleCategory::Lint, true), ToolConfig::new("assists", RuleCategory::Action, true), ToolConfig::new("splinter", RuleCategory::Lint, false), // Database linter, doesn't handle files - ToolConfig::new("pglinter", RuleCategory::Lint, true), + ToolConfig::new("pglinter", RuleCategory::Lint, false), // Database linter via pglinter extension ]; /// Visitor that collects rules for a specific category @@ -121,6 +121,7 @@ impl RegistryVisitor for CategoryRulesVisitor { pub fn generate_rules_configuration(mode: Mode) -> Result<()> { generate_tool_configuration(mode, "linter")?; generate_tool_configuration(mode, "splinter")?; + generate_tool_configuration(mode, "pglinter")?; Ok(()) } @@ -140,8 +141,8 @@ pub fn generate_tool_configuration(mode: Mode, tool_name: &str) -> Result<()> { match tool.name { "linter" => pgls_analyser::visit_registry(&mut visitor), "splinter" => pgls_splinter::registry::visit_registry(&mut visitor), + "pglinter" => pgls_pglinter::registry::visit_registry(&mut visitor), "assists" => unimplemented!("Assists rules not yet implemented"), - "pglinter" => unimplemented!("PGLinter rules not yet implemented"), _ => unreachable!(), } @@ -596,9 +597,12 @@ fn generate_lint_group_struct( } // For splinter rules, use SplinterRuleOptions for the shared ignore patterns + // For pglinter rules, use () as options since they don't have configurable options // For linter rules, use pgls_analyser::options::#rule_name let rule_option_type = if tool_name == "splinter" { quote! { crate::splinter::SplinterRuleOptions } + } else if tool_name == "pglinter" { + quote! { () } } else { quote! { pgls_analyser::options::#rule_name } }; diff --git a/xtask/codegen/src/generate_pglinter.rs b/xtask/codegen/src/generate_pglinter.rs new file mode 100644 index 000000000..bbf4289ab --- /dev/null +++ b/xtask/codegen/src/generate_pglinter.rs @@ -0,0 +1,687 @@ +use anyhow::{Context, Result}; +use biome_string_case::Case; +use quote::{format_ident, quote}; +use regex::Regex; +use std::collections::BTreeMap; +use std::path::Path; +use xtask::{glue::fs2, project_root, Mode}; + +use crate::update; + +/// Metadata extracted from rules.sql INSERT statements +#[derive(Debug, Clone)] +struct PglinterRuleMeta { + /// Rule name in PascalCase (e.g., "HowManyTableWithoutPrimaryKey") + name: String, + /// Rule name in snake_case (e.g., "how_many_table_without_primary_key") + snake_name: String, + /// Rule name in camelCase (e.g., "howManyTableWithoutPrimaryKey") + camel_name: String, + /// Rule code (e.g., "B001") + code: String, + /// Scope: BASE, SCHEMA, or CLUSTER + scope: String, + /// Description of the rule + description: String, + /// Message template with placeholders + message: String, + /// Suggested fixes + fixes: Vec, + /// Warning threshold percentage + warning_level: i32, + /// Error threshold percentage + error_level: i32, +} + +/// Parse pglinter rules from rules.sql and generate Rust code +pub fn generate_pglinter() -> Result<()> { + let rules_sql_path = project_root().join("pglinter_repo/sql/rules.sql"); + + if !rules_sql_path.exists() { + anyhow::bail!( + "pglinter_repo/sql/rules.sql not found. Clone pglinter repo first: git clone https://github.com/pmpetit/pglinter pglinter_repo" + ); + } + + let sql_content = fs2::read_to_string(&rules_sql_path)?; + let rules = parse_rules_sql(&sql_content)?; + + // Generate rule files + generate_rule_trait()?; + generate_rule_files(&rules)?; + generate_registry(&rules)?; + update_categories_file(&rules)?; + + Ok(()) +} + +/// Parse INSERT statements from rules.sql to extract rule metadata +fn parse_rules_sql(content: &str) -> Result> { + let mut rules = BTreeMap::new(); + + // Normalize the content: remove newlines within parentheses to make regex easier + // This handles multi-line ARRAY declarations + let normalized = normalize_sql_values(content); + + // Use regex to find value tuples + // Pattern: ('Name', 'CODE', num, num, 'SCOPE', 'desc', 'msg', ARRAY[...]) + let value_pattern = Regex::new( + r#"\(\s*'([^']+)',\s*'([^']+)',\s*(\d+),\s*(\d+),\s*'([^']+)',\s*'([^']+)',\s*'([^']+)',\s*ARRAY\s*\[(.*?)\]\s*\)"#, + )?; + + for caps in value_pattern.captures_iter(&normalized) { + let name = caps.get(1).unwrap().as_str().to_string(); + let code = caps.get(2).unwrap().as_str().to_string(); + let warning_level: i32 = caps.get(3).unwrap().as_str().parse()?; + let error_level: i32 = caps.get(4).unwrap().as_str().parse()?; + let scope = caps.get(5).unwrap().as_str().to_string(); + let description = caps + .get(6) + .unwrap() + .as_str() + .replace("''", "'") // Unescape single quotes + .to_string(); + let message = caps.get(7).unwrap().as_str().to_string(); + let fixes_str = caps.get(8).unwrap().as_str(); + + // Parse fixes array + let fixes: Vec = parse_fixes_array(fixes_str); + + let snake_name = Case::Snake.convert(&name); + let camel_name = to_camel_case(&name); + + let meta = PglinterRuleMeta { + name, + snake_name: snake_name.clone(), + camel_name, + code, + scope, + description, + message, + fixes, + warning_level, + error_level, + }; + + rules.insert(snake_name, meta); + } + + if rules.is_empty() { + anyhow::bail!("No rules found in rules.sql. Check the file format."); + } + + Ok(rules) +} + +/// Normalize SQL content by joining lines within value tuples +fn normalize_sql_values(content: &str) -> String { + let mut result = String::new(); + let mut in_value = false; + let mut paren_depth = 0; + + for c in content.chars() { + match c { + '(' => { + paren_depth += 1; + in_value = true; + result.push(c); + } + ')' => { + paren_depth -= 1; + if paren_depth == 0 { + in_value = false; + } + result.push(c); + } + '\n' | '\r' if in_value => { + result.push(' '); // Replace newlines with spaces inside values + } + _ => result.push(c), + } + } + + result +} + +/// Parse ARRAY['fix1', 'fix2'] into Vec +fn parse_fixes_array(s: &str) -> Vec { + let fix_pattern = Regex::new(r#"'([^']+)'"#).unwrap(); + fix_pattern + .captures_iter(s) + .map(|cap| cap.get(1).unwrap().as_str().to_string()) + .collect() +} + +/// Convert PascalCase to camelCase +fn to_camel_case(s: &str) -> String { + let mut chars = s.chars(); + match chars.next() { + None => String::new(), + Some(first) => first.to_lowercase().collect::() + chars.as_str(), + } +} + +/// Map scope to category directory name +fn scope_to_category(scope: &str) -> &'static str { + match scope { + "BASE" => "base", + "SCHEMA" => "schema", + "CLUSTER" => "cluster", + _ => "base", + } +} + +/// Generate src/rule.rs with PglinterRule trait +fn generate_rule_trait() -> Result<()> { + let rule_path = project_root().join("crates/pgls_pglinter/src/rule.rs"); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + use pgls_analyse::RuleMeta; + + /// Trait for pglinter (database-level) rules + /// + /// Pglinter rules are different from linter rules: + /// - They execute SQL queries against the database via pglinter extension + /// - They don't have AST-based execution + /// - Rule logic is in the pglinter Postgres extension + /// - Threshold configuration (warning/error levels) is handled by pglinter extension + pub trait PglinterRule: RuleMeta { + /// Rule code (e.g., "B001", "S001", "C001") + const CODE: &'static str; + + /// Rule scope (BASE, SCHEMA, or CLUSTER) + const SCOPE: &'static str; + + /// Description of what the rule detects + const DESCRIPTION: &'static str; + + /// Suggested fixes for violations + const FIXES: &'static [&'static str]; + } + }; + + let formatted = xtask::reformat(content)?; + update(&rule_path, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate rule files in src/rules/{category}/{rule_name}.rs +fn generate_rule_files(rules: &BTreeMap) -> Result<()> { + let rules_dir = project_root().join("crates/pgls_pglinter/src/rules"); + + // Group rules by scope/category + let mut rules_by_category: BTreeMap> = BTreeMap::new(); + for rule in rules.values() { + let category = scope_to_category(&rule.scope).to_string(); + rules_by_category.entry(category).or_default().push(rule); + } + + // Generate category directories and files + for (category, category_rules) in &rules_by_category { + let category_dir = rules_dir.join(category); + fs2::create_dir_all(&category_dir)?; + + // Generate individual rule files + for rule in category_rules { + generate_rule_file(&category_dir, rule)?; + } + + // Generate category mod.rs + generate_category_mod(&category_dir, category, category_rules)?; + } + + // Generate main rules/mod.rs + generate_rules_mod(&rules_dir, &rules_by_category)?; + + Ok(()) +} + +/// Generate individual rule file +fn generate_rule_file(category_dir: &Path, rule: &PglinterRuleMeta) -> Result<()> { + let rule_file = category_dir.join(format!("{}.rs", rule.snake_name)); + + let struct_name = format_ident!("{}", rule.name); + let camel_name = &rule.camel_name; + let code = &rule.code; + let scope = &rule.scope; + let description = &rule.description; + let warning_level = rule.warning_level; + let error_level = rule.error_level; + let category = scope_to_category(&rule.scope); + + // Create fixes as static slice + let fixes: Vec<&str> = rule.fixes.iter().map(|s| s.as_str()).collect(); + + // Build doc string + let doc_string = format!( + r#"# {} ({}) + +{} + +## Configuration + +Enable or disable this rule in your configuration: + +```json +{{ + "pglinter": {{ + "rules": {{ + "{}": {{ + "{}": "warn" + }} + }} + }} +}} +``` + +## Thresholds + +- Warning level: {}% +- Error level: {}% + +## Fixes + +{} + +## Documentation + +See: "#, + rule.name, + code, + description, + category, + camel_name, + warning_level, + error_level, + rule.fixes + .iter() + .map(|f| format!("- {f}")) + .collect::>() + .join("\n"), + code.to_lowercase(), + ); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + use crate::rule::PglinterRule; + + ::pgls_analyse::declare_rule! { + #[doc = #doc_string] + pub #struct_name { + version: "1.0.0", + name: #camel_name, + severity: pgls_diagnostics::Severity::Warning, + recommended: true, + } + } + + impl PglinterRule for #struct_name { + const CODE: &'static str = #code; + const SCOPE: &'static str = #scope; + const DESCRIPTION: &'static str = #description; + const FIXES: &'static [&'static str] = &[#(#fixes),*]; + } + }; + + let formatted = xtask::reformat(content)?; + update(&rule_file, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate category mod.rs that exports all rules +fn generate_category_mod( + category_dir: &Path, + category: &str, + rules: &[&PglinterRuleMeta], +) -> Result<()> { + let mod_file = category_dir.join("mod.rs"); + + let category_title = Case::Pascal.convert(category); + let category_struct = format_ident!("{}", category_title); + + // Generate mod declarations + let mod_names: Vec<_> = rules + .iter() + .map(|r| format_ident!("{}", r.snake_name)) + .collect(); + + // Generate rule paths for declare_lint_group! + let rule_paths: Vec<_> = rules + .iter() + .map(|r| { + let mod_name = format_ident!("{}", r.snake_name); + let struct_name = format_ident!("{}", r.name); + quote! { self::#mod_name::#struct_name } + }) + .collect(); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + #( pub mod #mod_names; )* + + ::pgls_analyse::declare_lint_group! { + pub #category_struct { + name: #category, + rules: [ + #( #rule_paths, )* + ] + } + } + }; + + let formatted = xtask::reformat(content)?; + update(&mod_file, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate main rules/mod.rs +fn generate_rules_mod( + rules_dir: &Path, + rules_by_category: &BTreeMap>, +) -> Result<()> { + let mod_file = rules_dir.join("mod.rs"); + + let category_mods: Vec<_> = rules_by_category + .keys() + .map(|cat| { + let mod_name = format_ident!("{}", cat); + quote! { pub mod #mod_name; } + }) + .collect(); + + // Generate group paths for declare_category! + let group_paths: Vec<_> = rules_by_category + .keys() + .map(|cat| { + let mod_name = format_ident!("{}", cat); + let group_name = format_ident!("{}", Case::Pascal.convert(cat)); + quote! { self::#mod_name::#group_name } + }) + .collect(); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + #( #category_mods )* + + ::pgls_analyse::declare_category! { + pub PgLinter { + kind: Lint, + groups: [ + #( #group_paths, )* + ] + } + } + }; + + let formatted = xtask::reformat(content)?; + update(&mod_file, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Generate src/registry.rs with visit_registry() and get_rule_category() +fn generate_registry(rules: &BTreeMap) -> Result<()> { + let registry_path = project_root().join("crates/pgls_pglinter/src/registry.rs"); + + // Generate match arms for rule code lookup (camelCase → code) + let code_arms: Vec<_> = rules + .values() + .map(|rule| { + let camel_name = &rule.camel_name; + let code = &rule.code; + quote! { + #camel_name => Some(#code) + } + }) + .collect(); + + // Generate match arms for category lookup (code → &'static Category) + let category_arms: Vec<_> = rules + .values() + .map(|rule| { + let code = &rule.code; + let category = scope_to_category(&rule.scope); + let camel_name = &rule.camel_name; + let category_path = format!("pglinter/{category}/{camel_name}"); + + quote! { + #code => Some(::pgls_diagnostics::category!(#category_path)) + } + }) + .collect(); + + // Generate match arms for rule metadata lookup by name + let metadata_arms: Vec<_> = rules + .values() + .map(|rule| { + let camel_name = &rule.camel_name; + let code = &rule.code; + let scope = &rule.scope; + let description = &rule.description; + let fixes: Vec<&str> = rule.fixes.iter().map(|s| s.as_str()).collect(); + + quote! { + #camel_name => Some(RuleMetadata { + code: #code, + name: #camel_name, + scope: #scope, + description: #description, + fixes: &[#(#fixes),*], + }) + } + }) + .collect(); + + // Generate match arms for rule metadata lookup by code + let metadata_by_code_arms: Vec<_> = rules + .values() + .map(|rule| { + let camel_name = &rule.camel_name; + let code = &rule.code; + let scope = &rule.scope; + let description = &rule.description; + let fixes: Vec<&str> = rule.fixes.iter().map(|s| s.as_str()).collect(); + + quote! { + #code => Some(RuleMetadata { + code: #code, + name: #camel_name, + scope: #scope, + description: #description, + fixes: &[#(#fixes),*], + }) + } + }) + .collect(); + + let content = quote! { + //! Generated file, do not edit by hand, see `xtask/codegen` + + use pgls_analyse::RegistryVisitor; + use pgls_diagnostics::Category; + + /// Metadata for a pglinter rule + #[derive(Debug, Clone, Copy)] + pub struct RuleMetadata { + /// Rule code (e.g., "B001") + pub code: &'static str, + /// Rule name in camelCase + pub name: &'static str, + /// Rule scope (BASE, SCHEMA, CLUSTER) + pub scope: &'static str, + /// Description of what the rule detects + pub description: &'static str, + /// Suggested fixes + pub fixes: &'static [&'static str], + } + + /// Visit all pglinter rules using the visitor pattern + pub fn visit_registry(registry: &mut V) { + registry.record_category::(); + } + + /// Get the pglinter rule code from the camelCase name + pub fn get_rule_code(name: &str) -> Option<&'static str> { + match name { + #( #code_arms, )* + _ => None, + } + } + + /// Get the diagnostic category for a rule code + pub fn get_rule_category(code: &str) -> Option<&'static Category> { + match code { + #( #category_arms, )* + _ => None, + } + } + + /// Get rule metadata by name (camelCase) + pub fn get_rule_metadata(name: &str) -> Option { + match name { + #( #metadata_arms, )* + _ => None, + } + } + + /// Get rule metadata by code (e.g., "B001", "S001", "C001") + pub fn get_rule_metadata_by_code(code: &str) -> Option { + match code { + #( #metadata_by_code_arms, )* + _ => None, + } + } + }; + + let formatted = xtask::reformat(content)?; + update(®istry_path, &formatted, &Mode::Overwrite)?; + + Ok(()) +} + +/// Update the categories.rs file with pglinter rules +fn update_categories_file(rules: &BTreeMap) -> Result<()> { + let categories_path = + project_root().join("crates/pgls_diagnostics_categories/src/categories.rs"); + + let mut content = fs2::read_to_string(&categories_path)?; + + // Generate pglinter rule entries grouped by category + let mut pglinter_rules: Vec<(String, String)> = rules + .values() + .map(|rule| { + let category = scope_to_category(&rule.scope); + let url = format!( + "https://github.com/pmpetit/pglinter#{}", + rule.code.to_lowercase() + ); + + ( + category.to_string(), + format!( + " \"pglinter/{}/{}\": \"{}\",", + category, rule.camel_name, url + ), + ) + }) + .collect(); + + // Sort by category, then by entry + pglinter_rules.sort_by(|a, b| a.0.cmp(&b.0).then_with(|| a.1.cmp(&b.1))); + + // Add meta diagnostics at the start + let mut all_entries = vec![ + " // Meta diagnostics".to_string(), + " \"pglinter/extensionNotInstalled\": \"Install the pglinter extension with: CREATE EXTENSION pglinter\",".to_string(), + " \"pglinter/ruleDisabledInExtension\": \"Enable the rule in the extension with: UPDATE pglinter.rules SET enable = true WHERE code = ''\",".to_string(), + ]; + + // Add rule categories + let mut current_category = String::new(); + for (category, entry) in &pglinter_rules { + if category != ¤t_category { + current_category = category.clone(); + all_entries.push(format!( + " // {} rules ({}-series)", + Case::Pascal.convert(category), + match category.as_str() { + "base" => "B", + "schema" => "S", + "cluster" => "C", + _ => "?", + } + )); + } + all_entries.push(entry.clone()); + } + + let pglinter_entries = all_entries.join("\n"); + + // Replace content between pglinter rules markers + let rules_start = "// pglinter rules start"; + let rules_end = "// pglinter rules end"; + + content = replace_between_markers( + &content, + rules_start, + rules_end, + &format!("\n{pglinter_entries}\n "), + )?; + + // Generate pglinter group entries + let mut categories: Vec = pglinter_rules.iter().map(|(cat, _)| cat.clone()).collect(); + categories.sort(); + categories.dedup(); + + let mut group_entries = vec![" \"pglinter\",".to_string()]; + for category in categories { + group_entries.push(format!(" \"pglinter/{category}\",")); + } + let groups_content = group_entries.join("\n"); + + // Replace content between pglinter groups markers + let groups_start = "// Pglinter groups start"; + let groups_end = "// Pglinter groups end"; + + content = replace_between_markers( + &content, + groups_start, + groups_end, + &format!("\n{groups_content}\n "), + )?; + + fs2::write(categories_path, content)?; + + Ok(()) +} + +/// Replace content between two markers +fn replace_between_markers( + content: &str, + start_marker: &str, + end_marker: &str, + new_content: &str, +) -> Result { + let start_pos = content + .find(start_marker) + .with_context(|| format!("Could not find '{start_marker}' marker"))?; + + let end_pos = content + .find(end_marker) + .with_context(|| format!("Could not find '{end_marker}' marker"))?; + + let mut result = String::new(); + result.push_str(&content[..start_pos + start_marker.len()]); + result.push_str(new_content); + result.push_str(&content[end_pos..]); + + Ok(result) +} diff --git a/xtask/codegen/src/lib.rs b/xtask/codegen/src/lib.rs index 3ed82ace1..268439e05 100644 --- a/xtask/codegen/src/lib.rs +++ b/xtask/codegen/src/lib.rs @@ -5,6 +5,7 @@ mod generate_bindings; mod generate_configuration; mod generate_crate; mod generate_new_analyser_rule; +mod generate_pglinter; mod generate_splinter; pub use self::generate_analyser::generate_analyser; @@ -12,6 +13,7 @@ pub use self::generate_bindings::generate_bindings; pub use self::generate_configuration::{generate_rules_configuration, generate_tool_configuration}; pub use self::generate_crate::generate_crate; pub use self::generate_new_analyser_rule::generate_new_analyser_rule; +pub use self::generate_pglinter::generate_pglinter; pub use self::generate_splinter::generate_splinter; use bpaf::Bpaf; use generate_new_analyser_rule::Category; @@ -95,4 +97,7 @@ pub enum TaskCommand { /// Generate splinter categories from the SQL file #[bpaf(command)] Splinter, + /// Generate pglinter rules from pglinter_repo/sql/rules.sql + #[bpaf(command)] + Pglinter, } diff --git a/xtask/codegen/src/main.rs b/xtask/codegen/src/main.rs index 43d11b44c..ca425db00 100644 --- a/xtask/codegen/src/main.rs +++ b/xtask/codegen/src/main.rs @@ -3,7 +3,7 @@ use xtask::{project_root, pushd, Result}; use xtask_codegen::{ generate_analyser, generate_bindings, generate_crate, generate_new_analyser_rule, - generate_rules_configuration, generate_splinter, task_command, TaskCommand, + generate_pglinter, generate_rules_configuration, generate_splinter, task_command, TaskCommand, }; fn main() -> Result<()> { @@ -34,6 +34,9 @@ fn main() -> Result<()> { TaskCommand::Splinter => { generate_splinter()?; } + TaskCommand::Pglinter => { + generate_pglinter()?; + } } Ok(()) From 42ef73b23b049c0988ce164aed360a4346f59f6d Mon Sep 17 00:00:00 2001 From: psteinroe Date: Mon, 29 Dec 2025 15:36:17 +0100 Subject: [PATCH 02/10] fix: Dockerfile --- .github/actions/setup-postgres/action.yml | 39 +++++++++++++++++------ .github/workflows/pull_request.yml | 17 ++++++++-- Dockerfile | 27 ++++++++++------ 3 files changed, 61 insertions(+), 22 deletions(-) diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index 8615c84a3..effd17c67 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -53,7 +53,7 @@ runs: echo "Extension library files:" ls -la "$(pg_config --pkglibdir)/" | grep plpgsql || echo "No plpgsql_check library found" - # Install the pglinter extension on macOS + # Install the pglinter extension on macOS (pgrx-based Rust extension) - name: Install and compile pglinter if: runner.os == 'macOS' shell: bash @@ -61,16 +61,22 @@ runs: # First, ensure we're using the same PostgreSQL that the action installed export PATH="$(pg_config --bindir):$PATH" + # Install cargo-pgrx (version must match pglinter's pgrx dependency) + cargo install cargo-pgrx --version 0.16.1 --locked + + # Determine postgres version for pgrx init + PG_VERSION=$(pg_config --version | grep -oE '[0-9]+' | head -1) + echo "PostgreSQL version: $PG_VERSION" + + # Initialize pgrx for the installed PostgreSQL version + cargo pgrx init --pg${PG_VERSION} $(which pg_config) + # Clone and build pglinter git clone https://github.com/pmpetit/pglinter.git cd pglinter - # Clean and compile - make USE_PGXS=1 clean - make USE_PGXS=1 all - - # Install (may need sudo depending on permissions) - sudo make USE_PGXS=1 install + # Install using pgrx + cargo pgrx install --pg-config $(which pg_config) --release # Verify installation echo "Extension control files:" @@ -93,17 +99,30 @@ runs: psql -c "SELECT extname, extversion FROM pg_extension WHERE extname IN ('plpgsql_check', 'pglinter');" # For Linux, use custom Docker image with plpgsql_check and pglinter - - name: Build and start PostgreSQL with extensions + - name: Set up Docker Buildx + if: runner.os == 'Linux' + uses: docker/setup-buildx-action@v3 + + - name: Build PostgreSQL image with cache + if: runner.os == 'Linux' + uses: docker/build-push-action@v5 + with: + context: . + load: true + tags: postgres-language-server-dev:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Start PostgreSQL container if: runner.os == 'Linux' shell: bash run: | - docker build -t postgres-plpgsql-check:latest . docker run -d --name postgres \ -e POSTGRES_USER=postgres \ -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_DB=postgres \ -p 5432:5432 \ - postgres-plpgsql-check:latest + postgres-language-server-dev:latest # Wait for postgres to be ready for _ in {1..30}; do if docker exec postgres pg_isready -U postgres; then diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 45d4f9879..e74a551cc 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -96,15 +96,26 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # we need to use the same database as we do locally for sqlx prepare to output the same hashes - - name: Build and start PostgreSQL with plpgsql_check + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build PostgreSQL image with cache + uses: docker/build-push-action@v5 + with: + context: . + load: true + tags: postgres-language-server-dev:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Start PostgreSQL run: | - docker build -t postgres-plpgsql-check:latest . docker run -d --name postgres \ -e POSTGRES_USER=postgres \ -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_DB=postgres \ -p 5432:5432 \ - postgres-plpgsql-check:latest + postgres-language-server-dev:latest # Wait for postgres to be ready for _ in {1..30}; do if docker exec postgres pg_isready -U postgres; then diff --git a/Dockerfile b/Dockerfile index 61cda3a26..3d7e5894a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,24 +1,33 @@ FROM postgres:15 -# Install build dependencies and extensions +# Install build dependencies RUN apt-get update && \ - apt-get install -y postgresql-server-dev-15 gcc make git && \ - # Install plpgsql_check + apt-get install -y postgresql-server-dev-15 gcc make git curl pkg-config libssl-dev libclang-dev clang && \ + # Install plpgsql_check (C extension - simple make install) cd /tmp && \ git clone https://github.com/okbob/plpgsql_check.git && \ cd plpgsql_check && \ make && \ make install && \ - # Install pglinter + cd /tmp && \ + rm -rf /tmp/plpgsql_check && \ + # Install Rust for pglinter (pgrx-based extension) + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y && \ + . $HOME/.cargo/env && \ + # Install cargo-pgrx (version must match pglinter's pgrx dependency) + cargo install cargo-pgrx --version 0.16.1 --locked && \ + # Initialize pgrx for PostgreSQL 15 + cargo pgrx init --pg15 $(which pg_config) && \ + # Clone and build pglinter cd /tmp && \ git clone https://github.com/pmpetit/pglinter.git && \ cd pglinter && \ - make && \ - make install && \ - # Cleanup - apt-get remove -y postgresql-server-dev-15 gcc make git && \ + cargo pgrx install --pg-config $(which pg_config) --release && \ + # Cleanup Rust and build dependencies + rm -rf /tmp/pglinter $HOME/.cargo $HOME/.rustup && \ + apt-get remove -y gcc make git curl pkg-config libssl-dev libclang-dev clang && \ apt-get autoremove -y && \ - rm -rf /tmp/plpgsql_check /tmp/pglinter /var/lib/apt/lists/* + rm -rf /var/lib/apt/lists/* # Add initialization script for extensions RUN echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" > /docker-entrypoint-initdb.d/01-create-extension.sql && \ From f851be7c65bd9bbe86fc5ef7a92465a37a71c0d8 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Mon, 29 Dec 2025 15:53:29 +0100 Subject: [PATCH 03/10] fix: clone extensions to /tmp on macOS to avoid workspace conflicts --- .github/actions/setup-postgres/action.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index effd17c67..f50b65f75 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -35,7 +35,8 @@ runs: echo "Extension directory: $(pg_config --sharedir)/extension" echo "Library directory: $(pg_config --pkglibdir)" - # Clone and build plpgsql_check + # Clone and build plpgsql_check (clone to /tmp to avoid workspace conflicts) + cd /tmp git clone https://github.com/okbob/plpgsql_check.git cd plpgsql_check @@ -71,7 +72,8 @@ runs: # Initialize pgrx for the installed PostgreSQL version cargo pgrx init --pg${PG_VERSION} $(which pg_config) - # Clone and build pglinter + # Clone and build pglinter (clone to /tmp to avoid workspace conflicts) + cd /tmp git clone https://github.com/pmpetit/pglinter.git cd pglinter From 5f88f8e1fbcdea8773a1167b6cd66d391c86fcf4 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Mon, 29 Dec 2025 16:05:42 +0100 Subject: [PATCH 04/10] fix: explicitly create extensions after container start --- .github/actions/setup-postgres/action.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index f50b65f75..e0afb0462 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -133,3 +133,14 @@ runs: sleep 1 done + # Verify extensions are created, create if missing + echo "Verifying extensions..." + docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" + docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS pglinter;" + + # Show extension status + docker exec postgres psql -U postgres -c "SELECT extname, extversion FROM pg_extension WHERE extname IN ('plpgsql_check', 'pglinter');" + + # Verify pglinter schema exists + docker exec postgres psql -U postgres -c "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'pglinter';" + From c332de465e2f453e248f9c8ae78f15ff6bfde92d Mon Sep 17 00:00:00 2001 From: psteinroe Date: Mon, 29 Dec 2025 16:18:26 +0100 Subject: [PATCH 05/10] fix: create extensions in template1 for SQLx test databases --- .github/actions/setup-postgres/action.yml | 9 +++++++-- Dockerfile | 8 +++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index e0afb0462..5182d9726 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -133,8 +133,13 @@ runs: sleep 1 done - # Verify extensions are created, create if missing - echo "Verifying extensions..." + # Verify extensions are created in template1 (for SQLx test databases) + echo "Creating extensions in template1..." + docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" + docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS pglinter;" + + # Also create in postgres database + echo "Creating extensions in postgres database..." docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS pglinter;" diff --git a/Dockerfile b/Dockerfile index 3d7e5894a..ae6b7fb9f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,5 +30,11 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* # Add initialization script for extensions -RUN echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" > /docker-entrypoint-initdb.d/01-create-extension.sql && \ +# Create extensions in template1 so they're available in all new databases (for SQLx tests) +# Also create in postgres database for direct connections +RUN echo "\\c template1" > /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS pglinter;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "\\c postgres" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ echo "CREATE EXTENSION IF NOT EXISTS pglinter;" >> /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file From 7082abe15de35e82feb5cb2dad83054521190d4d Mon Sep 17 00:00:00 2001 From: psteinroe Date: Mon, 29 Dec 2025 16:27:37 +0100 Subject: [PATCH 06/10] fix: install extensions in 'extensions' schema to avoid lint warnings --- .github/actions/setup-postgres/action.yml | 11 +++++++---- Dockerfile | 13 ++++++++----- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index 5182d9726..3f0780ce7 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -134,14 +134,17 @@ runs: done # Verify extensions are created in template1 (for SQLx test databases) + # Use 'extensions' schema to avoid triggering extensionInPublic lint echo "Creating extensions in template1..." - docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" - docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS pglinter;" + docker exec postgres psql -U postgres -d template1 -c "CREATE SCHEMA IF NOT EXISTS extensions;" + docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" + docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" # Also create in postgres database echo "Creating extensions in postgres database..." - docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" - docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS pglinter;" + docker exec postgres psql -U postgres -c "CREATE SCHEMA IF NOT EXISTS extensions;" + docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" + docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" # Show extension status docker exec postgres psql -U postgres -c "SELECT extname, extversion FROM pg_extension WHERE extname IN ('plpgsql_check', 'pglinter');" diff --git a/Dockerfile b/Dockerfile index ae6b7fb9f..9ae41ddec 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,11 +30,14 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* # Add initialization script for extensions -# Create extensions in template1 so they're available in all new databases (for SQLx tests) +# Create extensions in a dedicated 'extensions' schema to avoid triggering extensionInPublic lint +# Create in template1 so they're available in all new databases (for SQLx tests) # Also create in postgres database for direct connections RUN echo "\\c template1" > /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS pglinter;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE SCHEMA IF NOT EXISTS extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ echo "\\c postgres" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS pglinter;" >> /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file + echo "CREATE SCHEMA IF NOT EXISTS extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ + echo "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file From e995a00e77801fd91ba533f0b4ee501b0f8d23e0 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 30 Dec 2025 08:58:07 +0100 Subject: [PATCH 07/10] fix: ci --- .github/actions/setup-postgres/action.yml | 13 +- Dockerfile | 18 +- ...test_helper__completes_quoted_columns.snap | 1 + ...completes_quoted_columns_with_aliases.snap | 1 + ...oes_not_complete_cols_in_join_clauses.snap | 1 + ...__test_helper__handles_nested_queries.snap | 1 + ...t_helper__ignores_cols_in_from_clause.snap | 1 + ...__prefers_columns_of_mentioned_tables.snap | 1 + ...helper__prefers_not_mentioned_columns.snap | 1 + ...iple_columns_if_no_relation_specified.snap | 1 + ...columns_in_alter_table_and_drop_table.snap | 1 + ...er__suggests_columns_in_insert_clause.snap | 1 + ...per__suggests_columns_in_where_clause.snap | 1 + ..._suggests_columns_policy_using_clause.snap | 1 + ...ests_relevant_columns_without_letters.snap | 1 + crates/pgls_pglinter/src/cache.rs | 10 +- crates/pgls_pglinter/src/diagnostics.rs | 90 +++------ crates/pgls_pglinter/src/lib.rs | 32 +--- crates/pgls_pglinter/src/sarif.rs | 172 ------------------ crates/pgls_pglinter/tests/diagnostics.rs | 99 +++++++--- .../tests/snapshots/fk_without_index.snap | 13 ++ .../tests/snapshots/multiple_issues.snap | 35 ++++ .../snapshots/objects_with_uppercase.snap | 13 ++ .../snapshots/table_with_primary_key.snap | 13 ++ .../snapshots/table_without_primary_key.snap | 13 ++ 25 files changed, 221 insertions(+), 313 deletions(-) delete mode 100644 crates/pgls_pglinter/src/sarif.rs create mode 100644 crates/pgls_pglinter/tests/snapshots/fk_without_index.snap create mode 100644 crates/pgls_pglinter/tests/snapshots/multiple_issues.snap create mode 100644 crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap create mode 100644 crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap create mode 100644 crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml index 3f0780ce7..64d2a919f 100644 --- a/.github/actions/setup-postgres/action.yml +++ b/.github/actions/setup-postgres/action.yml @@ -133,14 +133,8 @@ runs: sleep 1 done - # Verify extensions are created in template1 (for SQLx test databases) - # Use 'extensions' schema to avoid triggering extensionInPublic lint - echo "Creating extensions in template1..." - docker exec postgres psql -U postgres -d template1 -c "CREATE SCHEMA IF NOT EXISTS extensions;" - docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" - docker exec postgres psql -U postgres -d template1 -c "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" - - # Also create in postgres database + # Create extensions in postgres database only (NOT template1) + # This avoids polluting test databases - tests that need extensions can create them explicitly echo "Creating extensions in postgres database..." docker exec postgres psql -U postgres -c "CREATE SCHEMA IF NOT EXISTS extensions;" docker exec postgres psql -U postgres -c "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" @@ -149,6 +143,3 @@ runs: # Show extension status docker exec postgres psql -U postgres -c "SELECT extname, extversion FROM pg_extension WHERE extname IN ('plpgsql_check', 'pglinter');" - # Verify pglinter schema exists - docker exec postgres psql -U postgres -c "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'pglinter';" - diff --git a/Dockerfile b/Dockerfile index 9ae41ddec..577e61843 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,14 +30,10 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* # Add initialization script for extensions -# Create extensions in a dedicated 'extensions' schema to avoid triggering extensionInPublic lint -# Create in template1 so they're available in all new databases (for SQLx tests) -# Also create in postgres database for direct connections -RUN echo "\\c template1" > /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE SCHEMA IF NOT EXISTS extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "\\c postgres" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE SCHEMA IF NOT EXISTS extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql && \ - echo "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" >> /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file +# Only create in postgres database (NOT template1) to avoid polluting test databases +# Tests that need extensions can create them explicitly +RUN printf '%s\n' \ + "CREATE SCHEMA IF NOT EXISTS extensions;" \ + "CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA extensions;" \ + "CREATE EXTENSION IF NOT EXISTS pglinter SCHEMA extensions;" \ + > /docker-entrypoint-initdb.d/01-create-extension.sql \ No newline at end of file diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap index 8ef51386a..fb155719f 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap index a21fe79fe..c6f434c5c 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__completes_quoted_columns_with_aliases.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap index 629e98bc6..b43206c35 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__does_not_complete_cols_in_join_clauses.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap index 0d203a4c1..4b2df051d 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__handles_nested_queries.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap index a6c1b9f57..34ab698ec 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__ignores_cols_in_from_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap index ca024d628..cd43623d7 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_columns_of_mentioned_tables.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap index 9ccf98a9a..bbf3b1858 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__prefers_not_mentioned_columns.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap index 463bca8f4..e466f559a 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__shows_multiple_columns_if_no_relation_specified.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap index 76dc39d7e..3d938ecd1 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_alter_table_and_drop_table.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap index 0d300f984..6ac18aaed 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_insert_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap index a291aad9e..5fc56a8eb 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_in_where_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap index d3db86856..430fcaf36 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_columns_policy_using_clause.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap index 758932c93..59224dd38 100644 --- a/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap +++ b/crates/pgls_completions/src/snapshots/pgls_completions__test_helper__suggests_relevant_columns_without_letters.snap @@ -1,6 +1,7 @@ --- source: crates/pgls_completions/src/test_helper.rs expression: final_snapshot +snapshot_kind: text --- ***Setup*** diff --git a/crates/pgls_pglinter/src/cache.rs b/crates/pgls_pglinter/src/cache.rs index 015fd73f2..f8739e4d5 100644 --- a/crates/pgls_pglinter/src/cache.rs +++ b/crates/pgls_pglinter/src/cache.rs @@ -43,12 +43,12 @@ impl PglinterCache { } } -/// Get disabled rules using pglinter's official API: pglinter.show_rules() +/// Get disabled rules by querying the pglinter.rules table +/// Uses the rules table directly since show_rules() only outputs to NOTICE pub async fn get_disabled_rules(conn: &PgPool) -> Result, sqlx::Error> { - let rows: Vec<(String, bool)> = - sqlx::query_as("SELECT rule_code, enabled FROM pglinter.show_rules()") - .fetch_all(conn) - .await?; + let rows: Vec<(String, bool)> = sqlx::query_as("SELECT code, enable FROM pglinter.rules") + .fetch_all(conn) + .await?; Ok(rows .into_iter() diff --git a/crates/pgls_pglinter/src/diagnostics.rs b/crates/pgls_pglinter/src/diagnostics.rs index ad19c81fc..eb75026c6 100644 --- a/crates/pgls_pglinter/src/diagnostics.rs +++ b/crates/pgls_pglinter/src/diagnostics.rs @@ -1,4 +1,4 @@ -//! Pglinter diagnostic types and conversion from SARIF +//! Pglinter diagnostic types use pgls_diagnostics::{ Advices, Category, DatabaseObjectOwned, Diagnostic, LogCategory, MessageAndDescription, @@ -6,8 +6,6 @@ use pgls_diagnostics::{ }; use std::io; -use crate::sarif; - /// A specialized diagnostic for pglinter (database-level linting via pglinter extension). #[derive(Debug, Diagnostic, PartialEq)] pub struct PglinterDiagnostic { @@ -75,72 +73,7 @@ impl Advices for PglinterAdvices { } } -/// Error when converting SARIF to diagnostics -#[derive(Debug)] -pub struct UnknownRuleError { - pub rule_code: String, -} - -impl std::fmt::Display for UnknownRuleError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Unknown pglinter rule code: {}", self.rule_code) - } -} - -impl std::error::Error for UnknownRuleError {} - impl PglinterDiagnostic { - /// Try to convert a single SARIF result to a pglinter diagnostic - pub fn try_from_sarif( - result: &sarif::Result, - rule_code: &str, - ) -> Result { - let category = - crate::registry::get_rule_category(rule_code).ok_or_else(|| UnknownRuleError { - rule_code: rule_code.to_string(), - })?; - - let metadata = crate::registry::get_rule_metadata_by_code(rule_code); - - let severity = match result.level_str() { - "error" => Severity::Error, - "warning" => Severity::Warning, - "note" => Severity::Information, - _ => Severity::Warning, - }; - - let message = result.message_text().to_string(); - let description = metadata - .map(|m| m.description.to_string()) - .unwrap_or_else(|| message.clone()); - - let fixes = metadata - .map(|m| m.fixes.iter().map(|s| s.to_string()).collect()) - .unwrap_or_default(); - - let object_list = { - let names = result.logical_location_names(); - if names.is_empty() { - None - } else { - Some(names.join("\n")) - } - }; - - Ok(PglinterDiagnostic { - category, - db_object: None, - message: message.into(), - severity, - advices: PglinterAdvices { - description, - rule_code: Some(rule_code.to_string()), - fixes, - object_list, - }, - }) - } - /// Create diagnostic for missing pglinter extension pub fn extension_not_installed() -> PglinterDiagnostic { PglinterDiagnostic { @@ -180,4 +113,25 @@ impl PglinterDiagnostic { }, } } + + /// Create diagnostic from rule code using known metadata + pub fn from_rule_code(rule_code: &str) -> Option { + let category = crate::registry::get_rule_category(rule_code)?; + let metadata = crate::registry::get_rule_metadata_by_code(rule_code)?; + + let fixes: Vec = metadata.fixes.iter().map(|s| s.to_string()).collect(); + + Some(PglinterDiagnostic { + category, + db_object: None, + message: metadata.description.into(), + severity: Severity::Warning, + advices: PglinterAdvices { + description: metadata.description.to_string(), + rule_code: Some(rule_code.to_string()), + fixes, + object_list: None, + }, + }) + } } diff --git a/crates/pgls_pglinter/src/lib.rs b/crates/pgls_pglinter/src/lib.rs index 8506a70e2..870a29072 100644 --- a/crates/pgls_pglinter/src/lib.rs +++ b/crates/pgls_pglinter/src/lib.rs @@ -5,7 +5,6 @@ mod diagnostics; pub mod registry; pub mod rule; pub mod rules; -pub mod sarif; use pgls_analyse::{AnalysisFilter, RegistryVisitor, RuleMeta}; use pgls_schema_cache::SchemaCache; @@ -14,7 +13,6 @@ use sqlx::PgPool; pub use cache::PglinterCache; pub use diagnostics::{PglinterAdvices, PglinterDiagnostic}; pub use rule::PglinterRule; -pub use sarif::SarifLog; /// Parameters for running pglinter #[derive(Debug)] @@ -121,37 +119,25 @@ pub async fn run_pglinter( Ok(results) } -/// Execute a single pglinter rule using pglinter.check_rule() +/// Execute a single pglinter rule using pglinter.check(rule_code) +/// Returns true if the rule detected issues async fn execute_rule( conn: &PgPool, rule_code: &str, ) -> Result>, sqlx::Error> { - let result: Option = sqlx::query_scalar("SELECT pglinter.check_rule($1)") + let has_issues: bool = sqlx::query_scalar("SELECT pglinter.check($1)") .bind(rule_code) - .fetch_optional(conn) + .fetch_one(conn) .await?; - let Some(sarif_json) = result else { - return Ok(None); - }; - - let sarif = match SarifLog::parse(&sarif_json) { - Ok(s) => s, - Err(_) => return Ok(None), - }; - - if !sarif.has_results() { + if !has_issues { return Ok(None); } - let diags: Vec<_> = sarif - .all_results() - .filter_map(|result| PglinterDiagnostic::try_from_sarif(result, rule_code).ok()) - .collect(); - - if diags.is_empty() { - Ok(None) + // Rule fired - create diagnostic from our known metadata + if let Some(diag) = PglinterDiagnostic::from_rule_code(rule_code) { + Ok(Some(vec![diag])) } else { - Ok(Some(diags)) + Ok(None) } } diff --git a/crates/pgls_pglinter/src/sarif.rs b/crates/pgls_pglinter/src/sarif.rs deleted file mode 100644 index d57d0979f..000000000 --- a/crates/pgls_pglinter/src/sarif.rs +++ /dev/null @@ -1,172 +0,0 @@ -//! Generic SARIF (Static Analysis Results Interchange Format) parser -//! -//! SARIF is a standard format for static analysis tool output. -//! See: https://sarifweb.azurewebsites.net/ - -use serde::Deserialize; - -/// SARIF 2.1.0 root object -#[derive(Debug, Deserialize)] -pub struct SarifLog { - #[serde(default)] - pub runs: Vec, -} - -/// A single run of a static analysis tool -#[derive(Debug, Deserialize)] -pub struct Run { - #[serde(default)] - pub results: Vec, - pub tool: Option, -} - -/// Information about the tool that produced the results -#[derive(Debug, Deserialize)] -pub struct Tool { - pub driver: Option, -} - -/// The tool driver (main component) -#[derive(Debug, Deserialize)] -pub struct Driver { - pub name: Option, - pub version: Option, -} - -/// A single result from the analysis -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Result { - /// The rule ID that was violated - pub rule_id: Option, - /// Severity level: "error", "warning", "note", "none" - pub level: Option, - /// The result message - pub message: Option, - /// Locations where the issue was found - #[serde(default)] - pub locations: Vec, -} - -/// A message with text content -#[derive(Debug, Deserialize)] -pub struct Message { - pub text: Option, -} - -/// A location in the source -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Location { - pub physical_location: Option, - pub logical_locations: Option>, -} - -/// A physical location (file, line, column) -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct PhysicalLocation { - pub artifact_location: Option, - pub region: Option, -} - -/// Location of an artifact (file) -#[derive(Debug, Deserialize)] -pub struct ArtifactLocation { - pub uri: Option, -} - -/// A region within a file -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Region { - pub start_line: Option, - pub start_column: Option, - pub end_line: Option, - pub end_column: Option, -} - -/// A logical location (schema, table, function name, etc.) -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct LogicalLocation { - pub name: Option, - pub fully_qualified_name: Option, - pub kind: Option, -} - -impl SarifLog { - /// Parse SARIF JSON into a structured log - pub fn parse(json: &str) -> std::result::Result { - serde_json::from_str(json) - } - - /// Get all results from all runs - pub fn all_results(&self) -> impl Iterator { - self.runs.iter().flat_map(|run| run.results.iter()) - } - - /// Check if there are any results - pub fn has_results(&self) -> bool { - self.runs.iter().any(|run| !run.results.is_empty()) - } -} - -impl Result { - /// Get the severity level, defaulting to "warning" - pub fn level_str(&self) -> &str { - self.level.as_deref().unwrap_or("warning") - } - - /// Get the message text, defaulting to empty string - pub fn message_text(&self) -> &str { - self.message - .as_ref() - .and_then(|m| m.text.as_deref()) - .unwrap_or("") - } - - /// Get logical location names (e.g., affected database objects) - pub fn logical_location_names(&self) -> Vec<&str> { - self.locations - .iter() - .filter_map(|loc| loc.logical_locations.as_ref()) - .flatten() - .filter_map(|ll| ll.fully_qualified_name.as_deref().or(ll.name.as_deref())) - .collect() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_minimal_sarif() { - let json = r#"{ - "runs": [{ - "results": [{ - "ruleId": "B001", - "level": "warning", - "message": { "text": "Table without primary key" } - }] - }] - }"#; - - let log = SarifLog::parse(json).unwrap(); - assert!(log.has_results()); - - let results: Vec<_> = log.all_results().collect(); - assert_eq!(results.len(), 1); - assert_eq!(results[0].rule_id.as_deref(), Some("B001")); - assert_eq!(results[0].level_str(), "warning"); - assert_eq!(results[0].message_text(), "Table without primary key"); - } - - #[test] - fn test_parse_empty_sarif() { - let json = r#"{"runs": [{"results": []}]}"#; - let log = SarifLog::parse(json).unwrap(); - assert!(!log.has_results()); - } -} diff --git a/crates/pgls_pglinter/tests/diagnostics.rs b/crates/pgls_pglinter/tests/diagnostics.rs index abdd68bcd..6d37418c2 100644 --- a/crates/pgls_pglinter/tests/diagnostics.rs +++ b/crates/pgls_pglinter/tests/diagnostics.rs @@ -1,6 +1,6 @@ //! Integration tests for pglinter diagnostics //! -//! These tests require the pglinter extension to be installed in the test database. +//! These tests configure pglinter thresholds to 0% so rules fire deterministically. use pgls_analyse::AnalysisFilter; use pgls_console::fmt::{Formatter, HTML}; @@ -49,42 +49,63 @@ impl Visit for TestVisitor { } } +/// Configure pglinter for deterministic testing: +/// - Set all thresholds to 0% warning, 1% error so any violation triggers +/// - Disable cluster-level rules that depend on pg_hba.conf +async fn configure_pglinter_for_tests(pool: &PgPool) { + // Set thresholds to 0% warning for deterministic behavior + let rules_to_configure = [ + "B001", "B002", "B003", "B004", "B005", "B006", "B007", "B008", "B009", "B010", "B011", + "B012", "S001", "S002", "S003", "S004", "S005", + ]; + + for rule in rules_to_configure { + let _ = sqlx::query("SELECT pglinter.update_rule_levels($1, 0, 1)") + .bind(rule) + .execute(pool) + .await; + } + + // Disable cluster-level rules (depend on pg_hba.conf, not deterministic) + for rule in ["C001", "C002", "C003"] { + let _ = sqlx::query("SELECT pglinter.disable_rule($1)") + .bind(rule) + .execute(pool) + .await; + } +} + struct TestSetup<'a> { name: &'a str, setup: &'a str, test_db: &'a PgPool, + /// Only include rules matching these prefixes (e.g., ["B001", "B005"]) + /// Empty means include all non-cluster rules + rule_filter: Vec<&'a str>, } impl TestSetup<'_> { async fn test(self) { - // Load schema cache - let schema_cache = SchemaCache::load(self.test_db) + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS pglinter") + .execute(self.test_db) .await - .expect("Failed to load schema cache"); + .expect("pglinter extension not available"); - // Assert pglinter extension is installed - assert!( - schema_cache.extensions.iter().any(|e| e.name == "pglinter"), - "pglinter extension must be installed for tests to run" - ); + configure_pglinter_for_tests(self.test_db).await; - // Run setup SQL sqlx::raw_sql(self.setup) .execute(self.test_db) .await .expect("Failed to setup test database"); - // Reload schema cache after setup let schema_cache = SchemaCache::load(self.test_db) .await - .expect("Failed to reload schema cache"); + .expect("Failed to load schema cache"); - // Load pglinter cache let cache = PglinterCache::load(self.test_db, &schema_cache) .await .expect("Failed to load pglinter cache"); - // Run pglinter checks with all rules enabled let filter = AnalysisFilter::default(); let diagnostics = run_pglinter( PglinterParams { @@ -97,26 +118,49 @@ impl TestSetup<'_> { .await .expect("Failed to run pglinter checks"); - let content = if diagnostics.is_empty() { + // Filter diagnostics + let filtered: Vec<_> = diagnostics + .iter() + .filter(|d| { + let category = d.category().map(|c| c.name()).unwrap_or(""); + // Exclude cluster-level rules + if category.contains("/cluster/") { + return false; + } + // Apply rule filter if specified + if !self.rule_filter.is_empty() { + let rule_code = d + .advices + .rule_code + .as_ref() + .map(|s| s.as_str()) + .unwrap_or(""); + return self.rule_filter.iter().any(|f| rule_code == *f); + } + true + }) + .collect(); + + // Sort by category for deterministic output + let mut sorted = filtered; + sorted.sort_by_key(|d| d.category().map(|c| c.name()).unwrap_or("unknown")); + + let content = if sorted.is_empty() { String::from("No Diagnostics") } else { let mut result = String::new(); - for (idx, diagnostic) in diagnostics.iter().enumerate() { + for (idx, diagnostic) in sorted.iter().enumerate() { if idx > 0 { writeln!(&mut result).unwrap(); writeln!(&mut result, "---").unwrap(); writeln!(&mut result).unwrap(); } - // Write category let category_name = diagnostic.category().map(|c| c.name()).unwrap_or("unknown"); writeln!(&mut result, "Category: {category_name}").unwrap(); - - // Write severity writeln!(&mut result, "Severity: {:?}", diagnostic.severity()).unwrap(); - // Write message let mut msg_content = vec![]; let mut writer = HTML::new(&mut msg_content); let mut formatter = Formatter::new(&mut writer); @@ -128,7 +172,6 @@ impl TestSetup<'_> { ) .unwrap(); - // Write advices using custom visitor let mut visitor = TestVisitor::new(); diagnostic.advices(&mut visitor).unwrap(); let advice_text = visitor.into_string(); @@ -148,16 +191,21 @@ impl TestSetup<'_> { } } -/// Test that checks extension availability +/// Test that pglinter extension can be created #[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] async fn extension_check(test_db: PgPool) { + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS pglinter") + .execute(&test_db) + .await + .expect("pglinter extension not available"); + let schema_cache = SchemaCache::load(&test_db) .await .expect("Failed to load schema cache"); assert!( schema_cache.extensions.iter().any(|e| e.name == "pglinter"), - "pglinter extension must be installed for tests to run" + "pglinter extension not found" ); } @@ -173,6 +221,7 @@ async fn table_without_primary_key(test_db: PgPool) { ); "#, test_db: &test_db, + rule_filter: vec!["B001"], } .test() .await; @@ -190,6 +239,7 @@ async fn table_with_primary_key(test_db: PgPool) { ); "#, test_db: &test_db, + rule_filter: vec!["B001"], } .test() .await; @@ -207,6 +257,7 @@ async fn objects_with_uppercase(test_db: PgPool) { ); "#, test_db: &test_db, + rule_filter: vec!["B005"], } .test() .await; @@ -229,6 +280,7 @@ async fn fk_without_index(test_db: PgPool) { ); "#, test_db: &test_db, + rule_filter: vec!["B003"], } .test() .await; @@ -261,6 +313,7 @@ async fn multiple_issues(test_db: PgPool) { ); "#, test_db: &test_db, + rule_filter: vec!["B001", "B003", "B005"], } .test() .await; diff --git a/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap b/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap new file mode 100644 index 000000000..5f79a0dea --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyTableWithoutIndexOnFk +Severity: Warning +Message: Count number of tables without index on foreign key. +Advices: +Count number of tables without index on foreign key. +[Info] Rule: B003 +How to fix: +[Info] 1. create a index on foreign key or change warning/error threshold diff --git a/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap b/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap new file mode 100644 index 000000000..15ed63c69 --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap @@ -0,0 +1,35 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Count number of objects with uppercase in name or in columns. +Advices: +Count number of objects with uppercase in name or in columns. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyTableWithoutIndexOnFk +Severity: Warning +Message: Count number of tables without index on foreign key. +Advices: +Count number of tables without index on foreign key. +[Info] Rule: B003 +How to fix: +[Info] 1. create a index on foreign key or change warning/error threshold + +--- + +Category: pglinter/base/howManyTableWithoutPrimaryKey +Severity: Warning +Message: Count number of tables without primary key. +Advices: +Count number of tables without primary key. +[Info] Rule: B001 +How to fix: +[Info] 1. create a primary key or change warning/error threshold diff --git a/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap b/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap new file mode 100644 index 000000000..3fbd674a6 --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Count number of objects with uppercase in name or in columns. +Advices: +Count number of objects with uppercase in name or in columns. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects diff --git a/crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap b/crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap new file mode 100644 index 000000000..b6611adb5 --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyTableWithoutPrimaryKey +Severity: Warning +Message: Count number of tables without primary key. +Advices: +Count number of tables without primary key. +[Info] Rule: B001 +How to fix: +[Info] 1. create a primary key or change warning/error threshold diff --git a/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap b/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap new file mode 100644 index 000000000..b6611adb5 --- /dev/null +++ b/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap @@ -0,0 +1,13 @@ +--- +source: crates/pgls_pglinter/tests/diagnostics.rs +expression: content +snapshot_kind: text +--- +Category: pglinter/base/howManyTableWithoutPrimaryKey +Severity: Warning +Message: Count number of tables without primary key. +Advices: +Count number of tables without primary key. +[Info] Rule: B001 +How to fix: +[Info] 1. create a primary key or change warning/error threshold From a2e542e172ebcc4bbee30c430e208cca07793933 Mon Sep 17 00:00:00 2001 From: psteinroe Date: Tue, 30 Dec 2025 09:17:53 +0100 Subject: [PATCH 08/10] fix: skip pglinter tests on Windows and fix clippy warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add #![cfg(not(target_os = "windows"))] to skip pglinter tests on Windows since the pglinter extension is not available there (only Linux/macOS) - Fix clippy warnings: use as_deref() and contains() instead of manual patterns - Remove table_with_primary_key test since pglinter checks all tables globally, making a "no diagnostics for table with PK" test impossible when other tables exist - Add plpgsql_check as dependency in test setup 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- Cargo.toml | 1 + crates/pgls_pglinter/tests/diagnostics.rs | 46 +++++++++---------- .../snapshots/table_with_primary_key.snap | 13 ------ 3 files changed, 22 insertions(+), 38 deletions(-) delete mode 100644 crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap diff --git a/Cargo.toml b/Cargo.toml index 32c3c8773..f41110e5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ pgls_lexer = { path = "./crates/pgls_lexer", version = "0.0.0" pgls_lexer_codegen = { path = "./crates/pgls_lexer_codegen", version = "0.0.0" } pgls_lsp = { path = "./crates/pgls_lsp", version = "0.0.0" } pgls_markup = { path = "./crates/pgls_markup", version = "0.0.0" } +pgls_matcher = { path = "./crates/pgls_matcher", version = "0.0.0" } pgls_pglinter = { path = "./crates/pgls_pglinter", version = "0.0.0" } pgls_plpgsql_check = { path = "./crates/pgls_plpgsql_check", version = "0.0.0" } pgls_query = { path = "./crates/pgls_query", version = "0.0.0" } diff --git a/crates/pgls_pglinter/tests/diagnostics.rs b/crates/pgls_pglinter/tests/diagnostics.rs index 6d37418c2..971678b8a 100644 --- a/crates/pgls_pglinter/tests/diagnostics.rs +++ b/crates/pgls_pglinter/tests/diagnostics.rs @@ -1,6 +1,11 @@ //! Integration tests for pglinter diagnostics //! //! These tests configure pglinter thresholds to 0% so rules fire deterministically. +//! +//! Note: These tests require the pglinter extension to be installed, which is only +//! available on Linux (via Docker) and macOS. Windows CI does not have pglinter. + +#![cfg(not(target_os = "windows"))] use pgls_analyse::AnalysisFilter; use pgls_console::fmt::{Formatter, HTML}; @@ -86,6 +91,12 @@ struct TestSetup<'a> { impl TestSetup<'_> { async fn test(self) { + // Create required extensions (pglinter may depend on plpgsql_check) + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS plpgsql_check") + .execute(self.test_db) + .await + .expect("plpgsql_check extension not available"); + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS pglinter") .execute(self.test_db) .await @@ -129,13 +140,8 @@ impl TestSetup<'_> { } // Apply rule filter if specified if !self.rule_filter.is_empty() { - let rule_code = d - .advices - .rule_code - .as_ref() - .map(|s| s.as_str()) - .unwrap_or(""); - return self.rule_filter.iter().any(|f| rule_code == *f); + let rule_code = d.advices.rule_code.as_deref().unwrap_or(""); + return self.rule_filter.contains(&rule_code); } true }) @@ -194,6 +200,12 @@ impl TestSetup<'_> { /// Test that pglinter extension can be created #[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] async fn extension_check(test_db: PgPool) { + // Create required extensions (pglinter may depend on plpgsql_check) + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS plpgsql_check") + .execute(&test_db) + .await + .expect("plpgsql_check extension not available"); + sqlx::raw_sql("CREATE EXTENSION IF NOT EXISTS pglinter") .execute(&test_db) .await @@ -210,6 +222,8 @@ async fn extension_check(test_db: PgPool) { } /// Test B001: Table without primary key +/// Note: pglinter checks ALL tables in the database globally, not just specific tables. +/// So this test verifies that B001 fires when any table lacks a primary key. #[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] async fn table_without_primary_key(test_db: PgPool) { TestSetup { @@ -227,24 +241,6 @@ async fn table_without_primary_key(test_db: PgPool) { .await; } -/// Test with a clean table (has primary key) -#[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] -async fn table_with_primary_key(test_db: PgPool) { - TestSetup { - name: "table_with_primary_key", - setup: r#" - CREATE TABLE public.test_with_pk ( - id serial PRIMARY KEY, - name text - ); - "#, - test_db: &test_db, - rule_filter: vec!["B001"], - } - .test() - .await; -} - /// Test B005: Objects with uppercase names #[sqlx::test(migrator = "pgls_test_utils::MIGRATIONS")] async fn objects_with_uppercase(test_db: PgPool) { diff --git a/crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap b/crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap deleted file mode 100644 index b6611adb5..000000000 --- a/crates/pgls_pglinter/tests/snapshots/table_with_primary_key.snap +++ /dev/null @@ -1,13 +0,0 @@ ---- -source: crates/pgls_pglinter/tests/diagnostics.rs -expression: content -snapshot_kind: text ---- -Category: pglinter/base/howManyTableWithoutPrimaryKey -Severity: Warning -Message: Count number of tables without primary key. -Advices: -Count number of tables without primary key. -[Info] Rule: B001 -How to fix: -[Info] 1. create a primary key or change warning/error threshold From 994da1b7b36200ea5f4211b3da5e4d2e012a357a Mon Sep 17 00:00:00 2001 From: psteinroe Date: Sun, 11 Jan 2026 16:12:14 +0100 Subject: [PATCH 09/10] chore: update lockfile --- Cargo.lock | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1fc0daaff..f8091f5f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2752,6 +2752,7 @@ dependencies = [ "pgls_console", "pgls_diagnostics", "pgls_env", + "pgls_matcher", "pgls_text_size", "rustc-hash 2.1.0", "schemars", @@ -2921,6 +2922,15 @@ dependencies = [ "quote", ] +[[package]] +name = "pgls_matcher" +version = "0.0.0" +dependencies = [ + "pgls_console", + "pgls_diagnostics", + "rustc-hash 2.1.0", +] + [[package]] name = "pgls_pglinter" version = "0.0.0" @@ -3017,8 +3027,10 @@ version = "0.0.0" dependencies = [ "insta", "pgls_analyse", + "pgls_configuration", "pgls_console", "pgls_diagnostics", + "pgls_matcher", "pgls_schema_cache", "pgls_test_utils", "serde", @@ -3177,6 +3189,7 @@ dependencies = [ "pgls_fs", "pgls_hover", "pgls_lexer", + "pgls_matcher", "pgls_plpgsql_check", "pgls_query", "pgls_query_ext", From 903bdbd9c49f07c15612175b33dca550fd41aaac Mon Sep 17 00:00:00 2001 From: psteinroe Date: Wed, 14 Jan 2026 10:30:28 +0100 Subject: [PATCH 10/10] progress --- ...def7cbde2c9e609d34c1bfcb87f50ed0f25ff.json | 32 ++++ ...c98d80b50a02c30a54b30611ca40b99d51ab7.json | 38 +++++ Dockerfile | 4 +- crates/pgls_pglinter/src/diagnostics.rs | 114 +++++++++++++ crates/pgls_pglinter/src/lib.rs | 156 +++++++++++++++--- .../tests/snapshots/fk_without_index.snap | 4 +- .../tests/snapshots/multiple_issues.snap | 100 ++++++++++- .../snapshots/objects_with_uppercase.snap | 59 ++++++- .../snapshots/table_without_primary_key.snap | 4 +- crates/pgls_schema_cache/src/indexes.rs | 21 +++ crates/pgls_schema_cache/src/lib.rs | 4 + .../pgls_schema_cache/src/queries/indexes.sql | 11 ++ .../src/queries/sequences.sql | 8 + crates/pgls_schema_cache/src/schema_cache.rs | 30 ++++ crates/pgls_schema_cache/src/sequences.rs | 20 +++ 15 files changed, 568 insertions(+), 37 deletions(-) create mode 100644 .sqlx/query-0aba89d3e0ed2e4586b94ea8fe2def7cbde2c9e609d34c1bfcb87f50ed0f25ff.json create mode 100644 .sqlx/query-4f7d0241b0c52b2d6742b441e9ac98d80b50a02c30a54b30611ca40b99d51ab7.json create mode 100644 crates/pgls_schema_cache/src/indexes.rs create mode 100644 crates/pgls_schema_cache/src/queries/indexes.sql create mode 100644 crates/pgls_schema_cache/src/queries/sequences.sql create mode 100644 crates/pgls_schema_cache/src/sequences.rs diff --git a/.sqlx/query-0aba89d3e0ed2e4586b94ea8fe2def7cbde2c9e609d34c1bfcb87f50ed0f25ff.json b/.sqlx/query-0aba89d3e0ed2e4586b94ea8fe2def7cbde2c9e609d34c1bfcb87f50ed0f25ff.json new file mode 100644 index 000000000..18290122d --- /dev/null +++ b/.sqlx/query-0aba89d3e0ed2e4586b94ea8fe2def7cbde2c9e609d34c1bfcb87f50ed0f25ff.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n c.oid::bigint as \"id!\",\n n.nspname as \"schema!\",\n c.relname as \"name!\"\nFROM pg_catalog.pg_class c\nJOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\nWHERE c.relkind = 'S'\n AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')\n", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "schema!", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "name!", + "type_info": "Name" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + false, + false + ] + }, + "hash": "0aba89d3e0ed2e4586b94ea8fe2def7cbde2c9e609d34c1bfcb87f50ed0f25ff" +} diff --git a/.sqlx/query-4f7d0241b0c52b2d6742b441e9ac98d80b50a02c30a54b30611ca40b99d51ab7.json b/.sqlx/query-4f7d0241b0c52b2d6742b441e9ac98d80b50a02c30a54b30611ca40b99d51ab7.json new file mode 100644 index 000000000..daa11c2c1 --- /dev/null +++ b/.sqlx/query-4f7d0241b0c52b2d6742b441e9ac98d80b50a02c30a54b30611ca40b99d51ab7.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n c.oid::bigint as \"id!\",\n n.nspname as \"schema!\",\n c.relname as \"name!\",\n t.relname as \"table_name!\"\nFROM pg_catalog.pg_class c\nJOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\nJOIN pg_catalog.pg_index i ON i.indexrelid = c.oid\nJOIN pg_catalog.pg_class t ON t.oid = i.indrelid\nWHERE c.relkind = 'i'\n AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')\n", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "schema!", + "type_info": "Name" + }, + { + "ordinal": 2, + "name": "name!", + "type_info": "Name" + }, + { + "ordinal": 3, + "name": "table_name!", + "type_info": "Name" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null, + false, + false, + false + ] + }, + "hash": "4f7d0241b0c52b2d6742b441e9ac98d80b50a02c30a54b30611ca40b99d51ab7" +} diff --git a/Dockerfile b/Dockerfile index 577e61843..f5cf837e5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,9 +18,9 @@ RUN apt-get update && \ cargo install cargo-pgrx --version 0.16.1 --locked && \ # Initialize pgrx for PostgreSQL 15 cargo pgrx init --pg15 $(which pg_config) && \ - # Clone and build pglinter + # Clone and build pglinter (using feat/83/violation_list branch for get_violations API) cd /tmp && \ - git clone https://github.com/pmpetit/pglinter.git && \ + git clone -b feat/83/violation_list https://github.com/pmpetit/pglinter.git && \ cd pglinter && \ cargo pgrx install --pg-config $(which pg_config) --release && \ # Cleanup Rust and build dependencies diff --git a/crates/pgls_pglinter/src/diagnostics.rs b/crates/pgls_pglinter/src/diagnostics.rs index eb75026c6..47753951f 100644 --- a/crates/pgls_pglinter/src/diagnostics.rs +++ b/crates/pgls_pglinter/src/diagnostics.rs @@ -134,4 +134,118 @@ impl PglinterDiagnostic { }, }) } + + /// Create diagnostic from a pglinter violation with optional object info + pub fn from_violation( + rule_code: &str, + db_object: Option, + ) -> Option { + let category = crate::registry::get_rule_category(rule_code)?; + let metadata = crate::registry::get_rule_metadata_by_code(rule_code)?; + + let fixes: Vec = metadata.fixes.iter().map(|s| s.to_string()).collect(); + + // Generate a violation-specific message + let message = violation_message(rule_code, db_object.as_ref()); + + // Generate a violation-specific advice (more detailed explanation) + let advice_description = violation_advice(rule_code); + + Some(PglinterDiagnostic { + category, + db_object, + message: message.into(), + severity: Severity::Warning, + advices: PglinterAdvices { + description: advice_description, + rule_code: Some(rule_code.to_string()), + fixes, + object_list: None, + }, + }) + } +} + +/// Generate a user-friendly violation message for a specific object +fn violation_message(rule_code: &str, db_object: Option<&DatabaseObjectOwned>) -> String { + let obj_name = db_object + .map(|obj| { + if let Some(ref schema) = obj.schema { + format!("'{}.{}'", schema, obj.name) + } else { + format!("'{}'", obj.name) + } + }) + .unwrap_or_else(|| "Object".to_string()); + + let obj_type = db_object + .and_then(|obj| obj.object_type.as_deref()) + .unwrap_or("object"); + + match rule_code { + // Base rules + "B001" => format!("Table {obj_name} has no primary key"), + "B002" => format!("Index on {obj_name} is redundant"), + "B003" => format!("Foreign key on {obj_name} has no index"), + "B004" => format!("Index on {obj_name} is unused"), + "B005" => format!( + "{} {} uses uppercase characters", + capitalize(obj_type), + obj_name + ), + "B006" => format!("Table {obj_name} is never selected from"), + "B007" => format!("Foreign key on {obj_name} references table outside its schema"), + "B008" => format!("Foreign key on {obj_name} has type mismatch"), + "B009" => format!("Table {obj_name} has duplicate trigger"), + "B010" => format!( + "{} {} uses reserved SQL keyword", + capitalize(obj_type), + obj_name + ), + "B011" => format!("Tables in {obj_name} have different owners"), + "B012" => format!("Table {obj_name} has composite primary key with too many columns"), + // Schema rules + "S001" => format!("Schema {obj_name} has no default role granted"), + "S002" => format!("Schema {obj_name} name is prefixed/suffixed with environment"), + "S003" => format!("Schema {obj_name} has insecure public access"), + "S004" => format!("Schema {obj_name} owner is an internal role"), + "S005" => format!("Schema {obj_name} owner doesn't match table owners"), + // Cluster rules + "C001" | "C002" | "C003" => "Cluster configuration issue".to_string(), + // Fallback + _ => format!("{} {} has a violation", capitalize(obj_type), obj_name), + } +} + +fn capitalize(s: &str) -> String { + let mut c = s.chars(); + match c.next() { + None => String::new(), + Some(f) => f.to_uppercase().collect::() + c.as_str(), + } +} + +/// Generate detailed advice for a specific rule violation +fn violation_advice(rule_code: &str) -> String { + match rule_code { + "B001" => "Tables without primary keys cannot be uniquely identified, which causes issues with replication, foreign keys, and efficient updates/deletes.".to_string(), + "B002" => "Redundant indexes waste storage space and slow down write operations without providing query benefits.".to_string(), + "B003" => "Foreign keys without indexes cause slow cascading operations and inefficient join queries.".to_string(), + "B004" => "Unused indexes consume storage and slow down writes without benefiting any queries.".to_string(), + "B005" => "Using uppercase in identifiers requires quoting and can cause case-sensitivity issues.".to_string(), + "B006" => "Tables never queried may be obsolete and candidates for removal.".to_string(), + "B007" => "Cross-schema foreign keys can cause issues with schema-level operations and access control.".to_string(), + "B008" => "Type mismatches in foreign keys can cause implicit casts, affecting performance and data integrity.".to_string(), + "B009" => "Duplicate triggers may cause unexpected behavior or redundant processing.".to_string(), + "B010" => "Using SQL reserved keywords as identifiers requires quoting and may cause compatibility issues.".to_string(), + "B011" => "Mixed ownership in schemas can cause permission and maintenance issues.".to_string(), + "B012" => "Large composite primary keys are inefficient for indexing and foreign key references.".to_string(), + "S001" => "Schemas without default role grants may have inconsistent permission patterns.".to_string(), + "S002" => "Environment prefixes/suffixes in schema names indicate environment-specific configuration that should be handled differently.".to_string(), + "S003" => "Insecure public access to schemas can expose data to unauthorized users.".to_string(), + "S004" => "Internal role ownership of schemas can cause maintenance and security issues.".to_string(), + "S005" => "Mismatched schema/table ownership can cause permission inconsistencies.".to_string(), + "C001" | "C002" | "C003" => "Cluster configuration issues may affect database stability and performance.".to_string(), + _ => String::new(), + } } diff --git a/crates/pgls_pglinter/src/lib.rs b/crates/pgls_pglinter/src/lib.rs index 870a29072..95b3e0542 100644 --- a/crates/pgls_pglinter/src/lib.rs +++ b/crates/pgls_pglinter/src/lib.rs @@ -7,6 +7,7 @@ pub mod rule; pub mod rules; use pgls_analyse::{AnalysisFilter, RegistryVisitor, RuleMeta}; +use pgls_diagnostics::DatabaseObjectOwned; use pgls_schema_cache::SchemaCache; use sqlx::PgPool; @@ -14,6 +15,24 @@ pub use cache::PglinterCache; pub use diagnostics::{PglinterAdvices, PglinterDiagnostic}; pub use rule::PglinterRule; +/// PostgreSQL catalog OIDs for different object types +mod pg_catalog { + pub const PG_CLASS: i64 = 1259; // tables, views, indexes, sequences + pub const PG_PROC: i64 = 1255; // functions, procedures + pub const PG_TYPE: i64 = 1247; // types + pub const PG_NAMESPACE: i64 = 2615; // schemas + pub const PG_ATTRIBUTE: i64 = 1249; // columns (objid=table oid, objsubid=column number) +} + +/// A violation row returned by pglinter.get_violations() +#[derive(Debug, sqlx::FromRow)] +struct ViolationRow { + rule_code: String, + classid: i64, + objid: i64, + objsubid: i32, +} + /// Parameters for running pglinter #[derive(Debug)] pub struct PglinterParams<'a> { @@ -109,35 +128,126 @@ pub async fn run_pglinter( return Ok(results); } - // Execute each rule - for rule_code in &runnable_rules { - if let Some(diags) = execute_rule(params.conn, rule_code).await? { - results.extend(diags); + // Fetch all violations in one query + let violations = fetch_violations(params.conn).await?; + + // Process violations, filtering by enabled rules and resolving objects from cache + for violation in violations { + // Skip violations for rules we're not checking + if !runnable_rules.contains(&violation.rule_code) { + continue; + } + + // Resolve the object from the schema cache + let db_object = resolve_object_from_cache( + params.schema_cache, + violation.classid, + violation.objid, + violation.objsubid, + ); + + // Create a diagnostic for this violation + if let Some(diag) = PglinterDiagnostic::from_violation(&violation.rule_code, db_object) { + results.push(diag); } } Ok(results) } -/// Execute a single pglinter rule using pglinter.check(rule_code) -/// Returns true if the rule detected issues -async fn execute_rule( - conn: &PgPool, - rule_code: &str, -) -> Result>, sqlx::Error> { - let has_issues: bool = sqlx::query_scalar("SELECT pglinter.check($1)") - .bind(rule_code) - .fetch_one(conn) - .await?; - - if !has_issues { - return Ok(None); - } +/// Fetch all violations from pglinter.get_violations() +async fn fetch_violations(conn: &PgPool) -> Result, sqlx::Error> { + sqlx::query_as::<_, ViolationRow>( + "select rule_code, classid::bigint, objid::bigint, objsubid from pglinter.get_violations()", + ) + .fetch_all(conn) + .await +} - // Rule fired - create diagnostic from our known metadata - if let Some(diag) = PglinterDiagnostic::from_rule_code(rule_code) { - Ok(Some(vec![diag])) - } else { - Ok(None) +/// Resolve a Postgres object from the schema cache using its catalog OIDs +fn resolve_object_from_cache( + schema_cache: &SchemaCache, + classid: i64, + objid: i64, + objsubid: i32, +) -> Option { + match classid { + pg_catalog::PG_CLASS => { + // pg_class contains tables, views, indexes, sequences, etc. + // Try tables first, then indexes, then sequences + schema_cache + .find_table_by_id(objid) + .map(|t| DatabaseObjectOwned { + schema: Some(t.schema.clone()), + name: t.name.clone(), + object_type: Some(format!("{:?}", t.table_kind).to_lowercase()), + }) + .or_else(|| { + schema_cache + .find_index_by_id(objid) + .map(|i| DatabaseObjectOwned { + schema: Some(i.schema.clone()), + name: i.name.clone(), + object_type: Some("index".to_string()), + }) + }) + .or_else(|| { + schema_cache + .find_sequence_by_id(objid) + .map(|s| DatabaseObjectOwned { + schema: Some(s.schema.clone()), + name: s.name.clone(), + object_type: Some("sequence".to_string()), + }) + }) + } + pg_catalog::PG_PROC => { + // Functions and procedures + schema_cache + .find_function_by_id(objid) + .map(|f| DatabaseObjectOwned { + schema: Some(f.schema.clone()), + name: f.name.clone(), + object_type: Some(format!("{:?}", f.kind).to_lowercase()), + }) + } + pg_catalog::PG_TYPE => { + // Types + schema_cache + .find_type_by_id(objid) + .map(|t| DatabaseObjectOwned { + schema: Some(t.schema.clone()), + name: t.name.clone(), + object_type: Some("type".to_string()), + }) + } + pg_catalog::PG_NAMESPACE => { + // Schemas + schema_cache + .find_schema_by_id(objid) + .map(|s| DatabaseObjectOwned { + schema: None, + name: s.name.clone(), + object_type: Some("schema".to_string()), + }) + } + pg_catalog::PG_ATTRIBUTE => { + // Columns: objid is table OID, objsubid is column number (attnum) + // Find the column by table OID and column number + let col_num = i64::from(objsubid); + schema_cache + .columns + .iter() + .find(|c| c.table_oid == objid && c.number == col_num) + .map(|c| DatabaseObjectOwned { + schema: Some(c.schema_name.clone()), + name: format!("{}.{}", c.table_name, c.name), + object_type: Some("column".to_string()), + }) + } + _ => { + // Unknown catalog - we can't resolve this object from the cache + None + } } } diff --git a/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap b/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap index 5f79a0dea..ea6afe546 100644 --- a/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap +++ b/crates/pgls_pglinter/tests/snapshots/fk_without_index.snap @@ -5,9 +5,9 @@ snapshot_kind: text --- Category: pglinter/base/howManyTableWithoutIndexOnFk Severity: Warning -Message: Count number of tables without index on foreign key. +Message: Foreign key on Object has no index Advices: -Count number of tables without index on foreign key. +Foreign keys without indexes cause slow cascading operations and inefficient join queries. [Info] Rule: B003 How to fix: [Info] 1. create a index on foreign key or change warning/error threshold diff --git a/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap b/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap index 15ed63c69..2c803dff6 100644 --- a/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap +++ b/crates/pgls_pglinter/tests/snapshots/multiple_issues.snap @@ -5,9 +5,97 @@ snapshot_kind: text --- Category: pglinter/base/howManyObjectsWithUppercase Severity: Warning -Message: Count number of objects with uppercase in name or in columns. +Message: Sequence 'public.BadName_id_seq' uses uppercase characters Advices: -Count number of objects with uppercase in name or in columns. +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Ordinary 'public.BadName' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Index 'public.BadName_pkey' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Index 'public.BadName_pkey' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Sequence 'public.BadName_id_seq' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Object Object uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Object Object uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Object Object uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Object Object uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. [Info] Rule: B005 How to fix: [Info] 1. Do not use uppercase for any database objects @@ -16,9 +104,9 @@ How to fix: Category: pglinter/base/howManyTableWithoutIndexOnFk Severity: Warning -Message: Count number of tables without index on foreign key. +Message: Foreign key on Object has no index Advices: -Count number of tables without index on foreign key. +Foreign keys without indexes cause slow cascading operations and inefficient join queries. [Info] Rule: B003 How to fix: [Info] 1. create a index on foreign key or change warning/error threshold @@ -27,9 +115,9 @@ How to fix: Category: pglinter/base/howManyTableWithoutPrimaryKey Severity: Warning -Message: Count number of tables without primary key. +Message: Table 'public.no_pk' has no primary key Advices: -Count number of tables without primary key. +Tables without primary keys cannot be uniquely identified, which causes issues with replication, foreign keys, and efficient updates/deletes. [Info] Rule: B001 How to fix: [Info] 1. create a primary key or change warning/error threshold diff --git a/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap b/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap index 3fbd674a6..68d4d88cd 100644 --- a/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap +++ b/crates/pgls_pglinter/tests/snapshots/objects_with_uppercase.snap @@ -5,9 +5,64 @@ snapshot_kind: text --- Category: pglinter/base/howManyObjectsWithUppercase Severity: Warning -Message: Count number of objects with uppercase in name or in columns. +Message: Sequence 'public.TestTable_id_seq' uses uppercase characters Advices: -Count number of objects with uppercase in name or in columns. +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Ordinary 'public.TestTable' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Index 'public.TestTable_pkey' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Column 'public.TestTable.UserName' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Index 'public.TestTable_pkey' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. +[Info] Rule: B005 +How to fix: +[Info] 1. Do not use uppercase for any database objects + +--- + +Category: pglinter/base/howManyObjectsWithUppercase +Severity: Warning +Message: Sequence 'public.TestTable_id_seq' uses uppercase characters +Advices: +Using uppercase in identifiers requires quoting and can cause case-sensitivity issues. [Info] Rule: B005 How to fix: [Info] 1. Do not use uppercase for any database objects diff --git a/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap b/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap index b6611adb5..5de8cc4e0 100644 --- a/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap +++ b/crates/pgls_pglinter/tests/snapshots/table_without_primary_key.snap @@ -5,9 +5,9 @@ snapshot_kind: text --- Category: pglinter/base/howManyTableWithoutPrimaryKey Severity: Warning -Message: Count number of tables without primary key. +Message: Table 'public.test_no_pk' has no primary key Advices: -Count number of tables without primary key. +Tables without primary keys cannot be uniquely identified, which causes issues with replication, foreign keys, and efficient updates/deletes. [Info] Rule: B001 How to fix: [Info] 1. create a primary key or change warning/error threshold diff --git a/crates/pgls_schema_cache/src/indexes.rs b/crates/pgls_schema_cache/src/indexes.rs new file mode 100644 index 000000000..fee160f5e --- /dev/null +++ b/crates/pgls_schema_cache/src/indexes.rs @@ -0,0 +1,21 @@ +use sqlx::PgPool; + +use crate::schema_cache::SchemaCacheItem; + +#[derive(Debug, Default, PartialEq, Eq)] +pub struct Index { + pub id: i64, + pub schema: String, + pub name: String, + pub table_name: String, +} + +impl SchemaCacheItem for Index { + type Item = Index; + + async fn load(pool: &PgPool) -> Result, sqlx::Error> { + sqlx::query_file_as!(Index, "src/queries/indexes.sql") + .fetch_all(pool) + .await + } +} diff --git a/crates/pgls_schema_cache/src/lib.rs b/crates/pgls_schema_cache/src/lib.rs index 6440cd01a..7cdfc7808 100644 --- a/crates/pgls_schema_cache/src/lib.rs +++ b/crates/pgls_schema_cache/src/lib.rs @@ -5,10 +5,12 @@ mod columns; mod extensions; mod functions; +mod indexes; mod policies; mod roles; mod schema_cache; mod schemas; +mod sequences; mod tables; mod triggers; mod types; @@ -17,10 +19,12 @@ mod versions; pub use columns::*; pub use extensions::Extension; pub use functions::{Behavior, Function, FunctionArg, FunctionArgs, ProcKind}; +pub use indexes::Index; pub use policies::{Policy, PolicyCommand}; pub use roles::*; pub use schema_cache::SchemaCache; pub use schemas::Schema; +pub use sequences::Sequence; pub use tables::{ReplicaIdentity, Table, TableKind}; pub use triggers::{Trigger, TriggerAffected, TriggerEvent}; pub use types::{PostgresType, PostgresTypeAttribute}; diff --git a/crates/pgls_schema_cache/src/queries/indexes.sql b/crates/pgls_schema_cache/src/queries/indexes.sql new file mode 100644 index 000000000..27e472017 --- /dev/null +++ b/crates/pgls_schema_cache/src/queries/indexes.sql @@ -0,0 +1,11 @@ +SELECT + c.oid::bigint as "id!", + n.nspname as "schema!", + c.relname as "name!", + t.relname as "table_name!" +FROM pg_catalog.pg_class c +JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid +JOIN pg_catalog.pg_class t ON t.oid = i.indrelid +WHERE c.relkind = 'i' + AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast') diff --git a/crates/pgls_schema_cache/src/queries/sequences.sql b/crates/pgls_schema_cache/src/queries/sequences.sql new file mode 100644 index 000000000..dc1d64a29 --- /dev/null +++ b/crates/pgls_schema_cache/src/queries/sequences.sql @@ -0,0 +1,8 @@ +SELECT + c.oid::bigint as "id!", + n.nspname as "schema!", + c.relname as "name!" +FROM pg_catalog.pg_class c +JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relkind = 'S' + AND n.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast') diff --git a/crates/pgls_schema_cache/src/schema_cache.rs b/crates/pgls_schema_cache/src/schema_cache.rs index d02fcc6cb..313d3656f 100644 --- a/crates/pgls_schema_cache/src/schema_cache.rs +++ b/crates/pgls_schema_cache/src/schema_cache.rs @@ -2,8 +2,10 @@ use sqlx::postgres::PgPool; use crate::columns::Column; use crate::functions::Function; +use crate::indexes::Index; use crate::policies::Policy; use crate::schemas::Schema; +use crate::sequences::Sequence; use crate::tables::Table; use crate::types::PostgresType; use crate::versions::Version; @@ -21,6 +23,8 @@ pub struct SchemaCache { pub extensions: Vec, pub triggers: Vec, pub roles: Vec, + pub indexes: Vec, + pub sequences: Vec, } impl SchemaCache { @@ -36,6 +40,8 @@ impl SchemaCache { triggers, roles, extensions, + indexes, + sequences, ) = futures_util::try_join!( Schema::load(pool), Table::load(pool), @@ -47,6 +53,8 @@ impl SchemaCache { Trigger::load(pool), Role::load(pool), Extension::load(pool), + Index::load(pool), + Sequence::load(pool), )?; let version = versions @@ -65,6 +73,8 @@ impl SchemaCache { triggers, roles, extensions, + indexes, + sequences, }) } @@ -102,6 +112,26 @@ impl SchemaCache { self.types.iter().find(|t| t.id == id) } + pub fn find_table_by_id(&self, id: i64) -> Option<&Table> { + self.tables.iter().find(|t| t.id == id) + } + + pub fn find_function_by_id(&self, id: i64) -> Option<&Function> { + self.functions.iter().find(|f| f.id == id) + } + + pub fn find_schema_by_id(&self, id: i64) -> Option<&Schema> { + self.schemas.iter().find(|s| s.id == id) + } + + pub fn find_index_by_id(&self, id: i64) -> Option<&Index> { + self.indexes.iter().find(|i| i.id == id) + } + + pub fn find_sequence_by_id(&self, id: i64) -> Option<&Sequence> { + self.sequences.iter().find(|s| s.id == id) + } + pub fn find_cols(&self, name: &str, table: Option<&str>, schema: Option<&str>) -> Vec<&Column> { let sanitized_name = Self::sanitize_identifier(name); self.columns diff --git a/crates/pgls_schema_cache/src/sequences.rs b/crates/pgls_schema_cache/src/sequences.rs new file mode 100644 index 000000000..61146d6be --- /dev/null +++ b/crates/pgls_schema_cache/src/sequences.rs @@ -0,0 +1,20 @@ +use sqlx::PgPool; + +use crate::schema_cache::SchemaCacheItem; + +#[derive(Debug, Default, PartialEq, Eq)] +pub struct Sequence { + pub id: i64, + pub schema: String, + pub name: String, +} + +impl SchemaCacheItem for Sequence { + type Item = Sequence; + + async fn load(pool: &PgPool) -> Result, sqlx::Error> { + sqlx::query_file_as!(Sequence, "src/queries/sequences.sql") + .fetch_all(pool) + .await + } +}