diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py deleted file mode 100644 index be6a8c5378..0000000000 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ /dev/null @@ -1,1391 +0,0 @@ -# Copyright 2022 Canonical Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""PostgreSQL helper class. - -The `postgresql` module provides methods for interacting with the PostgreSQL instance. - -Any charm using this library should import the `psycopg2` or `psycopg2-binary` dependency. -""" - -import logging -from collections import OrderedDict -from typing import Dict, List, Optional, Set, Tuple - -import psycopg2 -from ops.model import Relation -from psycopg2.sql import SQL, Composed, Identifier, Literal - -# The unique Charmhub library identifier, never change it -LIBID = "24ee217a54e840a598ff21a079c3e678" - -# Increment this major API version when introducing breaking changes -LIBAPI = 0 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 52 - -# Groups to distinguish HBA access -ACCESS_GROUP_IDENTITY = "identity_access" -ACCESS_GROUP_INTERNAL = "internal_access" -ACCESS_GROUP_RELATION = "relation_access" - -# List of access groups to filter role assignments by -ACCESS_GROUPS = [ - ACCESS_GROUP_IDENTITY, - ACCESS_GROUP_INTERNAL, - ACCESS_GROUP_RELATION, -] - -# Groups to distinguish database permissions -PERMISSIONS_GROUP_ADMIN = "admin" - -INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles" - -REQUIRED_PLUGINS = { - "address_standardizer": ["postgis"], - "address_standardizer_data_us": ["postgis"], - "jsonb_plperl": ["plperl"], - "postgis_raster": ["postgis"], - "postgis_tiger_geocoder": ["postgis", "fuzzystrmatch"], - "postgis_topology": ["postgis"], -} -DEPENDENCY_PLUGINS = set() -for dependencies in REQUIRED_PLUGINS.values(): - DEPENDENCY_PLUGINS |= set(dependencies) - -logger = logging.getLogger(__name__) - - -class PostgreSQLAssignGroupError(Exception): - """Exception raised when assigning to a group fails.""" - - -class PostgreSQLCreateDatabaseError(Exception): - """Exception raised when creating a database fails.""" - - -class PostgreSQLCreateGroupError(Exception): - """Exception raised when creating a group fails.""" - - -class PostgreSQLCreateUserError(Exception): - """Exception raised when creating a user fails.""" - - def __init__(self, message: Optional[str] = None): - super().__init__(message) - self.message = message - - -class PostgreSQLDatabasesSetupError(Exception): - """Exception raised when the databases setup fails.""" - - -class PostgreSQLDeleteUserError(Exception): - """Exception raised when deleting a user fails.""" - - -class PostgreSQLEnableDisableExtensionError(Exception): - """Exception raised when enabling/disabling an extension fails.""" - - -class PostgreSQLGetLastArchivedWALError(Exception): - """Exception raised when retrieving last archived WAL fails.""" - - -class PostgreSQLGetCurrentTimelineError(Exception): - """Exception raised when retrieving current timeline id for the PostgreSQL unit fails.""" - - -class PostgreSQLGetPostgreSQLVersionError(Exception): - """Exception raised when retrieving PostgreSQL version fails.""" - - -class PostgreSQLListAccessibleDatabasesForUserError(Exception): - """Exception raised when retrieving the accessible databases for a user fails.""" - - -class PostgreSQLListGroupsError(Exception): - """Exception raised when retrieving PostgreSQL groups list fails.""" - - -class PostgreSQLListUsersError(Exception): - """Exception raised when retrieving PostgreSQL users list fails.""" - - -class PostgreSQLUpdateUserPasswordError(Exception): - """Exception raised when updating a user password fails.""" - - -class PostgreSQLDatabaseExistsError(Exception): - """Exception raised during database existence check.""" - - -class PostgreSQLTableExistsError(Exception): - """Exception raised during table existence check.""" - - -class PostgreSQLIsTableEmptyError(Exception): - """Exception raised during table emptiness check.""" - - -class PostgreSQLCreatePublicationError(Exception): - """Exception raised when creating PostgreSQL publication.""" - - -class PostgreSQLPublicationExistsError(Exception): - """Exception raised during PostgreSQL publication existence check.""" - - -class PostgreSQLAlterPublicationError(Exception): - """Exception raised when altering PostgreSQL publication.""" - - -class PostgreSQLDropPublicationError(Exception): - """Exception raised when dropping PostgreSQL publication.""" - - -class PostgreSQLCreateSubscriptionError(Exception): - """Exception raised when creating PostgreSQL subscription.""" - - -class PostgreSQLSubscriptionExistsError(Exception): - """Exception raised during PostgreSQL subscription existence check.""" - - -class PostgreSQLUpdateSubscriptionError(Exception): - """Exception raised when updating PostgreSQL subscription.""" - - -class PostgreSQLRefreshSubscriptionError(Exception): - """Exception raised when refreshing PostgreSQL subscription.""" - - -class PostgreSQLDropSubscriptionError(Exception): - """Exception raised when dropping PostgreSQL subscription.""" - - -class PostgreSQL: - """Class to encapsulate all operations related to interacting with PostgreSQL instance.""" - - def __init__( - self, - primary_host: str, - current_host: str, - user: str, - password: str, - database: str, - system_users: Optional[List[str]] = None, - ): - self.primary_host = primary_host - self.current_host = current_host - self.user = user - self.password = password - self.database = database - self.system_users = system_users if system_users else [] - - def _configure_pgaudit(self, enable: bool) -> None: - connection = None - try: - connection = self._connect_to_database() - connection.autocommit = True - with connection.cursor() as cursor: - if enable: - cursor.execute("ALTER SYSTEM SET pgaudit.log = 'ROLE,DDL,MISC,MISC_SET';") - cursor.execute("ALTER SYSTEM SET pgaudit.log_client TO off;") - cursor.execute("ALTER SYSTEM SET pgaudit.log_parameter TO off;") - else: - cursor.execute("ALTER SYSTEM RESET pgaudit.log;") - cursor.execute("ALTER SYSTEM RESET pgaudit.log_client;") - cursor.execute("ALTER SYSTEM RESET pgaudit.log_parameter;") - cursor.execute("SELECT pg_reload_conf();") - finally: - if connection is not None: - connection.close() - - def _connect_to_database( - self, database: Optional[str] = None, database_host: Optional[str] = None - ) -> psycopg2.extensions.connection: - """Creates a connection to the database. - - Args: - database: database to connect to (defaults to the database - provided when the object for this class was created). - database_host: host to connect to instead of the primary host. - - Returns: - psycopg2 connection object. - """ - host = database_host if database_host is not None else self.primary_host - connection = psycopg2.connect( - f"dbname='{database if database else self.database}' user='{self.user}' host='{host}'" - f"password='{self.password}' connect_timeout=1" - ) - connection.autocommit = True - return connection - - def create_access_groups(self) -> None: - """Create access groups to distinguish HBA authentication methods.""" - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - for group in ACCESS_GROUPS: - cursor.execute( - SQL("SELECT TRUE FROM pg_roles WHERE rolname={};").format(Literal(group)) - ) - if cursor.fetchone() is not None: - continue - cursor.execute( - SQL("CREATE ROLE {} NOLOGIN;").format( - Identifier(group), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to create access groups: {e}") - raise PostgreSQLCreateGroupError() from e - finally: - if connection is not None: - connection.close() - - def create_database( - self, - database: str, - user: str, - plugins: Optional[List[str]] = None, - client_relations: Optional[List[Relation]] = None, - ) -> None: - """Creates a new database and grant privileges to a user on it. - - Args: - database: database to be created. - user: user that will have access to the database. - plugins: extensions to enable in the new database. - client_relations: current established client relations. - """ - plugins = plugins if plugins else [] - client_relations = client_relations if client_relations else [] - try: - connection = self._connect_to_database() - cursor = connection.cursor() - cursor.execute( - SQL("SELECT datname FROM pg_database WHERE datname={};").format(Literal(database)) - ) - if cursor.fetchone() is None: - cursor.execute(SQL("CREATE DATABASE {};").format(Identifier(database))) - cursor.execute( - SQL("REVOKE ALL PRIVILEGES ON DATABASE {} FROM PUBLIC;").format( - Identifier(database) - ) - ) - for user_to_grant_access in [user, PERMISSIONS_GROUP_ADMIN, *self.system_users]: - cursor.execute( - SQL("GRANT ALL PRIVILEGES ON DATABASE {} TO {};").format( - Identifier(database), Identifier(user_to_grant_access) - ) - ) - relations_accessing_this_database = 0 - for relation in client_relations: - for data in relation.data.values(): - if data.get("database") == database: - relations_accessing_this_database += 1 - with self._connect_to_database(database=database) as conn, conn.cursor() as curs: - curs.execute( - "SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT LIKE 'pg_%' and schema_name <> 'information_schema';" - ) - schemas = [row[0] for row in curs.fetchall()] - statements = self._generate_database_privileges_statements( - relations_accessing_this_database, schemas, user - ) - for statement in statements: - curs.execute(statement) - except psycopg2.Error as e: - logger.error(f"Failed to create database: {e}") - raise PostgreSQLCreateDatabaseError() from e - - # Enable preset extensions - self.enable_disable_extensions(dict.fromkeys(plugins, True), database) - - def create_user( - self, - user: str, - password: Optional[str] = None, - admin: bool = False, - replication: bool = False, - extra_user_roles: Optional[List[str]] = None, - ) -> None: - """Creates a database user. - - Args: - user: user to be created. - password: password to be assigned to the user. - admin: whether the user should have additional admin privileges. - replication: whether the user should have replication privileges. - extra_user_roles: additional privileges and/or roles to be assigned to the user. - """ - try: - # Separate roles and privileges from the provided extra user roles. - admin_role = False - roles = privileges = None - if extra_user_roles: - admin_role = PERMISSIONS_GROUP_ADMIN in extra_user_roles - valid_privileges, valid_roles = self.list_valid_privileges_and_roles() - roles = [ - role - for role in extra_user_roles - if role in valid_roles and role != PERMISSIONS_GROUP_ADMIN - ] - privileges = { - extra_user_role - for extra_user_role in extra_user_roles - if extra_user_role not in roles and extra_user_role != PERMISSIONS_GROUP_ADMIN - } - invalid_privileges = [ - privilege for privilege in privileges if privilege not in valid_privileges - ] - if len(invalid_privileges) > 0: - logger.error(f"Invalid extra user roles: {', '.join(privileges)}") - raise PostgreSQLCreateUserError(INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE) - - with self._connect_to_database() as connection, connection.cursor() as cursor: - # Create or update the user. - cursor.execute( - SQL("SELECT TRUE FROM pg_roles WHERE rolname={};").format(Literal(user)) - ) - if cursor.fetchone() is not None: - user_definition = "ALTER ROLE {}" - else: - user_definition = "CREATE ROLE {}" - user_definition += f"WITH {'NOLOGIN' if user == 'admin' else 'LOGIN'}{' SUPERUSER' if admin else ''}{' REPLICATION' if replication else ''} ENCRYPTED PASSWORD '{password}'{'IN ROLE admin CREATEDB' if admin_role else ''}" - if privileges: - user_definition += f" {' '.join(privileges)}" - cursor.execute(SQL("BEGIN;")) - cursor.execute(SQL("SET LOCAL log_statement = 'none';")) - cursor.execute(SQL(f"{user_definition};").format(Identifier(user))) - cursor.execute(SQL("COMMIT;")) - - # Add extra user roles to the new user. - if roles: - for role in roles: - cursor.execute( - SQL("GRANT {} TO {};").format(Identifier(role), Identifier(user)) - ) - except psycopg2.Error as e: - logger.error(f"Failed to create user: {e}") - raise PostgreSQLCreateUserError() from e - - def delete_user(self, user: str) -> None: - """Deletes a database user. - - Args: - user: user to be deleted. - """ - # First of all, check whether the user exists. Otherwise, do nothing. - users = self.list_users() - if user not in users: - return - - # List all databases. - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT datname FROM pg_database WHERE datistemplate = false;") - databases = [row[0] for row in cursor.fetchall()] - - # Existing objects need to be reassigned in each database - # before the user can be deleted. - for database in databases: - with self._connect_to_database( - database - ) as connection, connection.cursor() as cursor: - cursor.execute( - SQL("REASSIGN OWNED BY {} TO {};").format( - Identifier(user), Identifier(self.user) - ) - ) - cursor.execute(SQL("DROP OWNED BY {};").format(Identifier(user))) - - # Delete the user. - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute(SQL("DROP ROLE {};").format(Identifier(user))) - except psycopg2.Error as e: - logger.error(f"Failed to delete user: {e}") - raise PostgreSQLDeleteUserError() from e - - def grant_internal_access_group_memberships(self) -> None: - """Grant membership to the internal access-group to existing internal users.""" - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - for user in self.system_users: - cursor.execute( - SQL("GRANT {} TO {};").format( - Identifier(ACCESS_GROUP_INTERNAL), - Identifier(user), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to grant internal access group memberships: {e}") - raise PostgreSQLAssignGroupError() from e - finally: - if connection is not None: - connection.close() - - def grant_relation_access_group_memberships(self) -> None: - """Grant membership to the relation access-group to existing relation users.""" - rel_users = self.list_users_from_relation() - if not rel_users: - return - - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - rel_groups = SQL(",").join(Identifier(group) for group in [ACCESS_GROUP_RELATION]) - rel_users = SQL(",").join(Identifier(user) for user in rel_users) - - cursor.execute( - SQL("GRANT {groups} TO {users};").format( - groups=rel_groups, - users=rel_users, - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to grant relation access group memberships: {e}") - raise PostgreSQLAssignGroupError() from e - finally: - if connection is not None: - connection.close() - - def grant_replication_privileges( - self, - user: str, - database: str, - schematables: list[str], - old_schematables: list[str] | None = None, - ) -> None: - """Grant CONNECT privilege on database and SELECT privilege on tables. - - Args: - user: target user for privileges grant. - database: database to grant CONNECT privilege on. - schematables: list of tables with schema notation to grant SELECT privileges on. - old_schematables: list of tables with schema notation to revoke all privileges from. - """ - connection = None - try: - connection = self._connect_to_database(database=database) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("GRANT CONNECT ON DATABASE {} TO {};").format( - Identifier(database), Identifier(user) - ) - ) - if old_schematables: - cursor.execute( - SQL("REVOKE ALL PRIVILEGES ON TABLE {} FROM {};").format( - SQL(",").join( - Identifier(schematable.split(".")[0], schematable.split(".")[1]) - for schematable in old_schematables - ), - Identifier(user), - ) - ) - cursor.execute( - SQL("GRANT SELECT ON TABLE {} TO {};").format( - SQL(",").join( - Identifier(schematable.split(".")[0], schematable.split(".")[1]) - for schematable in schematables - ), - Identifier(user), - ) - ) - finally: - if connection: - connection.close() - - def revoke_replication_privileges( - self, user: str, database: str, schematables: list[str] - ) -> None: - """Revoke all privileges from tables and database. - - Args: - user: target user for privileges revocation. - database: database to remove all privileges from. - schematables: list of tables with schema notation to revoke all privileges from. - """ - connection = None - try: - connection = self._connect_to_database(database=database) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("REVOKE ALL PRIVILEGES ON TABLE {} FROM {};").format( - SQL(",").join( - Identifier(schematable.split(".")[0], schematable.split(".")[1]) - for schematable in schematables - ), - Identifier(user), - ) - ) - cursor.execute( - SQL("REVOKE ALL PRIVILEGES ON DATABASE {} FROM {};").format( - Identifier(database), Identifier(user) - ) - ) - finally: - if connection: - connection.close() - - def enable_disable_extensions( - self, extensions: Dict[str, bool], database: Optional[str] = None - ) -> None: - """Enables or disables a PostgreSQL extension. - - Args: - extensions: the name of the extensions. - database: optional database where to enable/disable the extension. - - Raises: - PostgreSQLEnableDisableExtensionError if the operation fails. - """ - connection = None - try: - if database is not None: - databases = [database] - else: - # Retrieve all the databases. - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT datname FROM pg_database WHERE NOT datistemplate;") - databases = {database[0] for database in cursor.fetchall()} - - ordered_extensions = OrderedDict() - for plugin in DEPENDENCY_PLUGINS: - ordered_extensions[plugin] = extensions.get(plugin, False) - for extension, enable in extensions.items(): - ordered_extensions[extension] = enable - - self._configure_pgaudit(False) - - # Enable/disabled the extension in each database. - for database in databases: - with self._connect_to_database( - database=database - ) as connection, connection.cursor() as cursor: - for extension, enable in ordered_extensions.items(): - cursor.execute( - f"CREATE EXTENSION IF NOT EXISTS {extension};" - if enable - else f"DROP EXTENSION IF EXISTS {extension};" - ) - self._configure_pgaudit(ordered_extensions.get("pgaudit", False)) - except psycopg2.errors.UniqueViolation: - pass - except psycopg2.errors.DependentObjectsStillExist: - raise - except psycopg2.Error as e: - raise PostgreSQLEnableDisableExtensionError() from e - finally: - if connection is not None: - connection.close() - - def _generate_database_privileges_statements( - self, relations_accessing_this_database: int, schemas: List[str], user: str - ) -> List[Composed]: - """Generates a list of databases privileges statements.""" - statements = [] - if relations_accessing_this_database == 1: - statements.append( - SQL( - """DO $$ -DECLARE r RECORD; -BEGIN - FOR r IN (SELECT statement FROM (SELECT 1 AS index,'ALTER TABLE '|| schemaname || '."' || tablename ||'" OWNER TO {};' AS statement -FROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema') -UNION SELECT 2 AS index,'ALTER SEQUENCE '|| sequence_schema || '."' || sequence_name ||'" OWNER TO {};' AS statement -FROM information_schema.sequences WHERE NOT sequence_schema IN ('pg_catalog', 'information_schema') -UNION SELECT 3 AS index,'ALTER FUNCTION '|| nsp.nspname || '."' || p.proname ||'"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {};' AS statement -FROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'f' -UNION SELECT 4 AS index,'ALTER PROCEDURE '|| nsp.nspname || '."' || p.proname ||'"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {};' AS statement -FROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'p' -UNION SELECT 5 AS index,'ALTER AGGREGATE '|| nsp.nspname || '."' || p.proname ||'"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO {};' AS statement -FROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'a' -UNION SELECT 6 AS index,'ALTER VIEW '|| schemaname || '."' || viewname ||'" OWNER TO {};' AS statement -FROM pg_catalog.pg_views WHERE NOT schemaname IN ('pg_catalog', 'information_schema')) AS statements ORDER BY index) LOOP - EXECUTE format(r.statement); - END LOOP; -END; $$;""" - ).format( - Identifier(user), - Identifier(user), - Identifier(user), - Identifier(user), - Identifier(user), - Identifier(user), - ) - ) - statements.append( - SQL( - "UPDATE pg_catalog.pg_largeobject_metadata\n" - "SET lomowner = (SELECT oid FROM pg_roles WHERE rolname = {})\n" - "WHERE lomowner = (SELECT oid FROM pg_roles WHERE rolname = {});" - ).format(Literal(user), Literal(self.user)) - ) - for schema in schemas: - statements.append( - SQL("ALTER SCHEMA {} OWNER TO {};").format( - Identifier(schema), Identifier(user) - ) - ) - else: - for schema in schemas: - schema = Identifier(schema) - statements.extend([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA {} TO {};").format( - schema, Identifier(user) - ), - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {} TO {};").format( - schema, Identifier(user) - ), - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA {} TO {};").format( - schema, Identifier(user) - ), - SQL("GRANT USAGE ON SCHEMA {} TO {};").format(schema, Identifier(user)), - SQL("GRANT CREATE ON SCHEMA {} TO {};").format(schema, Identifier(user)), - ]) - return statements - - def get_last_archived_wal(self) -> str: - """Get the name of the last archived wal for the current PostgreSQL cluster.""" - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT last_archived_wal FROM pg_stat_archiver;") - return cursor.fetchone()[0] - except psycopg2.Error as e: - logger.error(f"Failed to get PostgreSQL last archived WAL: {e}") - raise PostgreSQLGetLastArchivedWALError() from e - - def get_current_timeline(self) -> str: - """Get the timeline id for the current PostgreSQL unit.""" - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT timeline_id FROM pg_control_checkpoint();") - return cursor.fetchone()[0] - except psycopg2.Error as e: - logger.error(f"Failed to get PostgreSQL current timeline id: {e}") - raise PostgreSQLGetCurrentTimelineError() from e - - def get_postgresql_text_search_configs(self) -> Set[str]: - """Returns the PostgreSQL available text search configs. - - Returns: - Set of PostgreSQL text search configs. - """ - with self._connect_to_database( - database_host=self.current_host - ) as connection, connection.cursor() as cursor: - cursor.execute("SELECT CONCAT('pg_catalog.', cfgname) FROM pg_ts_config;") - text_search_configs = cursor.fetchall() - return {text_search_config[0] for text_search_config in text_search_configs} - - def get_postgresql_timezones(self) -> Set[str]: - """Returns the PostgreSQL available timezones. - - Returns: - Set of PostgreSQL timezones. - """ - with self._connect_to_database( - database_host=self.current_host - ) as connection, connection.cursor() as cursor: - cursor.execute("SELECT name FROM pg_timezone_names;") - timezones = cursor.fetchall() - return {timezone[0] for timezone in timezones} - - def get_postgresql_default_table_access_methods(self) -> Set[str]: - """Returns the PostgreSQL available table access methods. - - Returns: - Set of PostgreSQL table access methods. - """ - with self._connect_to_database( - database_host=self.current_host - ) as connection, connection.cursor() as cursor: - cursor.execute("SELECT amname FROM pg_am WHERE amtype = 't';") - access_methods = cursor.fetchall() - return {access_method[0] for access_method in access_methods} - - def get_postgresql_version(self, current_host=True) -> str: - """Returns the PostgreSQL version. - - Returns: - PostgreSQL version number. - """ - host = self.current_host if current_host else None - try: - with self._connect_to_database( - database_host=host - ) as connection, connection.cursor() as cursor: - cursor.execute("SELECT version();") - # Split to get only the version number. - return cursor.fetchone()[0].split(" ")[1] - except psycopg2.Error as e: - logger.error(f"Failed to get PostgreSQL version: {e}") - raise PostgreSQLGetPostgreSQLVersionError() from e - - def is_tls_enabled(self, check_current_host: bool = False) -> bool: - """Returns whether TLS is enabled. - - Args: - check_current_host: whether to check the current host - instead of the primary host. - - Returns: - whether TLS is enabled. - """ - try: - with self._connect_to_database( - database_host=self.current_host if check_current_host else None - ) as connection, connection.cursor() as cursor: - cursor.execute("SHOW ssl;") - return "on" in cursor.fetchone()[0] - except psycopg2.Error: - # Connection errors happen when PostgreSQL has not started yet. - return False - - def list_access_groups(self, current_host=False) -> Set[str]: - """Returns the list of PostgreSQL database access groups. - - Args: - current_host: whether to check the current host - instead of the primary host. - - Returns: - List of PostgreSQL database access groups. - """ - connection = None - host = self.current_host if current_host else None - try: - with self._connect_to_database( - database_host=host - ) as connection, connection.cursor() as cursor: - cursor.execute( - "SELECT groname FROM pg_catalog.pg_group WHERE groname LIKE '%_access';" - ) - access_groups = cursor.fetchall() - return {group[0] for group in access_groups} - except psycopg2.Error as e: - logger.error(f"Failed to list PostgreSQL database access groups: {e}") - raise PostgreSQLListGroupsError() from e - finally: - if connection is not None: - connection.close() - - def list_accessible_databases_for_user(self, user: str, current_host=False) -> Set[str]: - """Returns the list of accessible databases for a specific user. - - Args: - user: the user to check. - current_host: whether to check the current host - instead of the primary host. - - Returns: - List of accessible database (the ones where - the user has the CONNECT privilege). - """ - connection = None - host = self.current_host if current_host else None - try: - with self._connect_to_database( - database_host=host - ) as connection, connection.cursor() as cursor: - cursor.execute( - SQL( - "SELECT TRUE FROM pg_catalog.pg_user WHERE usename = {} AND usesuper;" - ).format(Literal(user)) - ) - if cursor.fetchone() is not None: - return {"all"} - cursor.execute( - SQL( - "SELECT datname FROM pg_catalog.pg_database WHERE has_database_privilege({}, datname, 'CONNECT') AND NOT datistemplate;" - ).format(Literal(user)) - ) - databases = cursor.fetchall() - return {database[0] for database in databases} - except psycopg2.Error as e: - logger.error(f"Failed to list accessible databases for user {user}: {e}") - raise PostgreSQLListAccessibleDatabasesForUserError() from e - finally: - if connection is not None: - connection.close() - - def list_users(self, group: Optional[str] = None, current_host=False) -> Set[str]: - """Returns the list of PostgreSQL database users. - - Args: - group: optional group to filter the users. - current_host: whether to check the current host - instead of the primary host. - - Returns: - List of PostgreSQL database users. - """ - connection = None - host = self.current_host if current_host else None - try: - with self._connect_to_database( - database_host=host - ) as connection, connection.cursor() as cursor: - if group: - query = SQL( - "SELECT usename FROM (SELECT UNNEST(grolist) AS user_id FROM pg_catalog.pg_group WHERE groname = {}) AS g JOIN pg_catalog.pg_user AS u ON g.user_id = u.usesysid;" - ).format(Literal(group)) - else: - query = "SELECT usename FROM pg_catalog.pg_user;" - cursor.execute(query) - usernames = cursor.fetchall() - return {username[0] for username in usernames} - except psycopg2.Error as e: - logger.error(f"Failed to list PostgreSQL database users: {e}") - raise PostgreSQLListUsersError() from e - finally: - if connection is not None: - connection.close() - - def list_users_from_relation(self, current_host=False) -> Set[str]: - """Returns the list of PostgreSQL database users that were created by a relation. - - Args: - current_host: whether to check the current host - instead of the primary host. - - Returns: - List of PostgreSQL database users. - """ - connection = None - host = self.current_host if current_host else None - try: - with self._connect_to_database( - database_host=host - ) as connection, connection.cursor() as cursor: - cursor.execute( - "SELECT usename " - "FROM pg_catalog.pg_user " - "WHERE usename LIKE 'relation_id_%' OR usename LIKE 'relation-%' " - "OR usename LIKE 'pgbouncer_auth_relation_%' OR usename LIKE '%_user_%_%' " - "OR usename LIKE 'logical_replication_relation_%';" - ) - usernames = cursor.fetchall() - return {username[0] for username in usernames} - except psycopg2.Error as e: - logger.error(f"Failed to list PostgreSQL database users: {e}") - raise PostgreSQLListUsersError() from e - finally: - if connection is not None: - connection.close() - - def list_valid_privileges_and_roles(self) -> Tuple[Set[str], Set[str]]: - """Returns two sets with valid privileges and roles. - - Returns: - Tuple containing two sets: the first with valid privileges - and the second with valid roles. - """ - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT rolname FROM pg_roles;") - return { - "createdb", - "createrole", - "superuser", - }, {role[0] for role in cursor.fetchall() if role[0]} - - def set_up_database(self, temp_location: Optional[str] = None) -> None: - """Set up postgres database with the right permissions.""" - connection = None - cursor = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT TRUE FROM pg_roles WHERE rolname='admin';") - if cursor.fetchone() is None: - # Allow access to the postgres database only to the system users. - cursor.execute("REVOKE ALL PRIVILEGES ON DATABASE postgres FROM PUBLIC;") - cursor.execute("REVOKE CREATE ON SCHEMA public FROM PUBLIC;") - for user in self.system_users: - cursor.execute( - SQL("GRANT ALL PRIVILEGES ON DATABASE postgres TO {};").format( - Identifier(user) - ) - ) - self.create_user( - PERMISSIONS_GROUP_ADMIN, - extra_user_roles=["pg_read_all_data", "pg_write_all_data"], - ) - cursor.execute("GRANT CONNECT ON DATABASE postgres TO admin;") - except psycopg2.Error as e: - logger.error(f"Failed to set up databases: {e}") - raise PostgreSQLDatabasesSetupError() from e - finally: - if cursor is not None: - cursor.close() - if connection is not None: - connection.close() - - def update_user_password( - self, username: str, password: str, database_host: Optional[str] = None - ) -> None: - """Update a user password. - - Args: - username: the user to update the password. - password: the new password for the user. - database_host: the host to connect to. - - Raises: - PostgreSQLUpdateUserPasswordError if the password couldn't be changed. - """ - connection = None - try: - with self._connect_to_database( - database_host=database_host - ) as connection, connection.cursor() as cursor: - cursor.execute(SQL("BEGIN;")) - cursor.execute(SQL("SET LOCAL log_statement = 'none';")) - cursor.execute( - SQL("ALTER USER {} WITH ENCRYPTED PASSWORD '" + password + "';").format( - Identifier(username) - ) - ) - cursor.execute(SQL("COMMIT;")) - except psycopg2.Error as e: - logger.error(f"Failed to update user password: {e}") - raise PostgreSQLUpdateUserPasswordError() from e - finally: - if connection is not None: - connection.close() - - def is_restart_pending(self) -> bool: - """Query pg_settings for pending restart.""" - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute("SELECT COUNT(*) FROM pg_settings WHERE pending_restart=True;") - return cursor.fetchone()[0] > 0 - except psycopg2.OperationalError: - logger.warning("Failed to connect to PostgreSQL.") - return False - except psycopg2.Error as e: - logger.error(f"Failed to check if restart is pending: {e}") - return False - finally: - if connection: - connection.close() - - def database_exists(self, db: str) -> bool: - """Check whether specified database exists.""" - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute( - SQL("SELECT datname FROM pg_database WHERE datname={};").format(Literal(db)) - ) - return cursor.fetchone() is not None - except psycopg2.Error as e: - logger.error(f"Failed to check Postgresql database existence: {e}") - raise PostgreSQLDatabaseExistsError() from e - - def table_exists(self, db: str, schema: str, table: str) -> bool: - """Check whether specified table in database exists.""" - try: - with self._connect_to_database( - database=db - ) as connection, connection.cursor() as cursor: - cursor.execute( - SQL( - "SELECT tablename FROM pg_tables WHERE schemaname={} AND tablename={};" - ).format(Literal(schema), Literal(table)) - ) - return cursor.fetchone() is not None - except psycopg2.Error as e: - logger.error(f"Failed to check Postgresql table existence: {e}") - raise PostgreSQLTableExistsError() from e - - def is_table_empty(self, db: str, schema: str, table: str) -> bool: - """Check whether table is empty.""" - try: - with self._connect_to_database( - database=db - ) as connection, connection.cursor() as cursor: - cursor.execute(SQL("SELECT COUNT(1) FROM {};").format(Identifier(schema, table))) - return cursor.fetchone()[0] == 0 - except psycopg2.Error as e: - logger.error(f"Failed to check whether table is empty: {e}") - raise PostgreSQLIsTableEmptyError() from e - - def create_publication(self, db: str, name: str, schematables: list[str]) -> None: - """Create PostgreSQL publication.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("CREATE PUBLICATION {} FOR TABLE {};").format( - Identifier(name), - SQL(",").join( - Identifier(schematable.split(".")[0], schematable.split(".")[1]) - for schematable in schematables - ), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to create Postgresql publication: {e}") - raise PostgreSQLCreatePublicationError() from e - finally: - if connection: - connection.close() - - def publication_exists(self, db: str, publication: str) -> bool: - """Check whether specified subscription in database exists.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("SELECT pubname FROM pg_publication WHERE pubname={};").format( - Literal(publication) - ) - ) - return cursor.fetchone() is not None - except psycopg2.Error as e: - logger.error(f"Failed to check Postgresql publication existence: {e}") - raise PostgreSQLPublicationExistsError() from e - finally: - if connection: - connection.close() - - def alter_publication(self, db: str, name: str, schematables: list[str]) -> None: - """Alter PostgreSQL publication.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("ALTER PUBLICATION {} SET TABLE {};").format( - Identifier(name), - SQL(",").join( - Identifier(schematable.split(".")[0], schematable.split(".")[1]) - for schematable in schematables - ), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to alter Postgresql publication: {e}") - raise PostgreSQLAlterPublicationError() from e - finally: - if connection: - connection.close() - - def drop_publication(self, db: str, publication: str) -> None: - """Drop PostgreSQL publication.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("DROP PUBLICATION IF EXISTS {};").format( - Identifier(publication), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to drop Postgresql publication: {e}") - raise PostgreSQLDropPublicationError() from e - finally: - if connection: - connection.close() - - def create_subscription( - self, - subscription: str, - host: str, - db: str, - user: str, - password: str, - publication: str, - replication_slot: str, - ) -> None: - """Create PostgreSQL subscription.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL( - "CREATE SUBSCRIPTION {} CONNECTION {} PUBLICATION {} WITH (copy_data=true,create_slot=false,enabled=true,slot_name={});" - ).format( - Identifier(subscription), - Literal(f"host={host} dbname={db} user={user} password={password}"), - Identifier(publication), - Identifier(replication_slot), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to create Postgresql subscription: {e}") - raise PostgreSQLCreateSubscriptionError() from e - finally: - if connection: - connection.close() - - def subscription_exists(self, db: str, subscription: str) -> bool: - """Check whether specified subscription in database exists.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("SELECT subname FROM pg_subscription WHERE subname={};").format( - Literal(subscription) - ) - ) - return cursor.fetchone() is not None - except psycopg2.Error as e: - logger.error(f"Failed to check Postgresql subscription existence: {e}") - raise PostgreSQLSubscriptionExistsError() from e - finally: - if connection: - connection.close() - - def update_subscription(self, db: str, subscription: str, host: str, user: str, password: str): - """Update PostgreSQL subscription connection details.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("ALTER SUBSCRIPTION {} CONNECTION {}").format( - Identifier(subscription), - Literal(f"host={host} dbname={db} user={user} password={password}"), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to update Postgresql subscription: {e}") - raise PostgreSQLUpdateSubscriptionError() from e - finally: - if connection: - connection.close() - - def refresh_subscription(self, db: str, subscription: str): - """Refresh PostgreSQL subscription to pull publication changes.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection.cursor() as cursor: - cursor.execute( - SQL("ALTER SUBSCRIPTION {} REFRESH PUBLICATION").format( - Identifier(subscription) - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to refresh Postgresql subscription: {e}") - raise PostgreSQLRefreshSubscriptionError() from e - finally: - if connection: - connection.close() - - def drop_subscription(self, db: str, subscription: str) -> None: - """Drop PostgreSQL subscription.""" - connection = None - try: - connection = self._connect_to_database(database=db) - with connection, connection.cursor() as cursor: - cursor.execute( - SQL("ALTER SUBSCRIPTION {} DISABLE;").format( - Identifier(subscription), - ) - ) - cursor.execute( - SQL("ALTER SUBSCRIPTION {} SET (slot_name=NONE);").format( - Identifier(subscription), - ) - ) - cursor.execute( - SQL("DROP SUBSCRIPTION {};").format( - Identifier(subscription), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to drop Postgresql subscription: {e}") - raise PostgreSQLDropSubscriptionError() from e - finally: - if connection: - connection.close() - - @staticmethod - def build_postgresql_group_map(group_map: Optional[str]) -> List[Tuple]: - """Build the PostgreSQL authorization group-map. - - Args: - group_map: serialized group-map with the following format: - =, - =, - ... - - Returns: - List of LDAP group to PostgreSQL group tuples. - """ - if group_map is None: - return [] - - group_mappings = group_map.split(",") - group_mappings = (mapping.strip() for mapping in group_mappings) - group_map_list = [] - - for mapping in group_mappings: - mapping_parts = mapping.split("=") - if len(mapping_parts) != 2: - raise ValueError("The group-map must contain value pairs split by commas") - - ldap_group = mapping_parts[0] - psql_group = mapping_parts[1] - - if psql_group in [*ACCESS_GROUPS, PERMISSIONS_GROUP_ADMIN]: - logger.warning(f"Tried to assign LDAP users to forbidden group: {psql_group}") - continue - - group_map_list.append((ldap_group, psql_group)) - - return group_map_list - - @staticmethod - def build_postgresql_parameters( - config_options: dict, available_memory: int, limit_memory: Optional[int] = None - ) -> Optional[dict]: - """Builds the PostgreSQL parameters. - - Args: - config_options: charm config options containing profile and PostgreSQL parameters. - available_memory: available memory to use in calculation in bytes. - limit_memory: (optional) limit memory to use in calculation in bytes. - - Returns: - Dictionary with the PostgreSQL parameters. - """ - if limit_memory: - available_memory = min(available_memory, limit_memory) - profile = config_options["profile"] - logger.debug(f"Building PostgreSQL parameters for {profile=} and {available_memory=}") - parameters = {} - for config, value in config_options.items(): - # Filter config option not related to PostgreSQL parameters. - if not config.startswith(( - "connection", - "cpu", - "durability", - "instance", - "logging", - "memory", - "optimizer", - "request", - "response", - "session", - "storage", - "vacuum", - )): - continue - parameter = "_".join(config.split("_")[1:]) - if parameter in ["date_style", "time_zone"]: - parameter = "".join(x.capitalize() for x in parameter.split("_")) - parameters[parameter] = value - shared_buffers_max_value_in_mb = int(available_memory * 0.4 / 10**6) - shared_buffers_max_value = int(shared_buffers_max_value_in_mb * 10**3 / 8) - if parameters.get("shared_buffers", 0) > shared_buffers_max_value: - raise Exception( - f"Shared buffers config option should be at most 40% of the available memory, which is {shared_buffers_max_value_in_mb}MB" - ) - if profile == "production": - if "shared_buffers" in parameters: - # Convert to bytes to use in the calculation. - shared_buffers = parameters["shared_buffers"] * 8 * 10**3 - else: - # Use 25% of the available memory for shared_buffers. - # and the remaining as cache memory. - shared_buffers = int(available_memory * 0.25) - parameters["shared_buffers"] = f"{int(shared_buffers * 128 / 10**6)}" - effective_cache_size = int(available_memory - shared_buffers) - parameters.update({ - "effective_cache_size": f"{int(effective_cache_size / 10**6) * 128}" - }) - return parameters - - def validate_date_style(self, date_style: str) -> bool: - """Validate a date style against PostgreSQL. - - Returns: - Whether the date style is valid. - """ - try: - with self._connect_to_database( - database_host=self.current_host - ) as connection, connection.cursor() as cursor: - cursor.execute( - SQL( - "SET DateStyle to {};", - ).format(Identifier(date_style)) - ) - return True - except psycopg2.Error: - return False - - def validate_group_map(self, group_map: Optional[str]) -> bool: - """Validate the PostgreSQL authorization group-map. - - Args: - group_map: serialized group-map with the following format: - =, - =, - ... - - Returns: - Whether the group-map is valid. - """ - if group_map is None: - return True - - try: - group_map = self.build_postgresql_group_map(group_map) - except ValueError: - return False - - for _, psql_group in group_map: - with self._connect_to_database() as connection, connection.cursor() as cursor: - query = SQL("SELECT TRUE FROM pg_roles WHERE rolname={};") - query = query.format(Literal(psql_group)) - cursor.execute(query) - - if cursor.fetchone() is None: - return False - - return True - - def is_user_in_hba(self, username: str) -> bool: - """Check if user was added in pg_hba.""" - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute( - SQL( - "SELECT COUNT(*) FROM pg_hba_file_rules WHERE {} = ANY(user_name);" - ).format(Literal(username)) - ) - return cursor.fetchone()[0] > 0 - except psycopg2.Error as e: - logger.debug(f"Failed to check pg_hba: {e}") - return False - finally: - if connection: - connection.close() diff --git a/poetry.lock b/poetry.lock index 329daf9a8e..c07fb7dab6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "allure-pytest" @@ -511,7 +511,7 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["integration", "unit"] +groups = ["main", "integration", "unit"] markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, @@ -906,7 +906,7 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" -groups = ["integration", "unit"] +groups = ["main", "integration", "unit"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -1062,6 +1062,21 @@ files = [ [package.dependencies] referencing = ">=0.31.0" +[[package]] +name = "jubilant" +version = "1.3.0" +description = "Juju CLI wrapper for charm integration testing" +optional = false +python-versions = ">=3.8" +groups = ["integration"] +files = [ + {file = "jubilant-1.3.0-py3-none-any.whl", hash = "sha256:a5ea4a3bf487ab0286eaad0de9df145761657c08beb834931340b9ebb1f41292"}, + {file = "jubilant-1.3.0.tar.gz", hash = "sha256:ff43d6eb67a986958db6317d7ff3df1c8c160d0c56736628919ac1f7319d444e"}, +] + +[package.dependencies] +PyYAML = "==6.*" + [[package]] name = "juju" version = "3.6.1.3" @@ -1415,7 +1430,7 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["integration", "unit"] +groups = ["main", "integration", "unit"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -1493,7 +1508,7 @@ version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.9" -groups = ["integration", "unit"] +groups = ["main", "integration", "unit"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -1515,6 +1530,26 @@ files = [ {file = "poetry_core-2.1.3.tar.gz", hash = "sha256:0522a015477ed622c89aad56a477a57813cace0c8e7ff2a2906b7ef4a2e296a4"}, ] +[[package]] +name = "postgresql-charms-single-kernel" +version = "0.0.0" +description = "Shared and reusable code for PostgreSQL-related charms" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "c1ef734fd749b360e14fdb5a24c55315cb0a9000.zip", hash = "sha256:7db514446a2ae18294b1215f3ea3e3f81211970a904e45f17b60bc4a0d4bdc05"}, +] + +[package.dependencies] +ops = ">=2.22.0" +psycopg2-binary = ">=2.9.10" +pytest = ">=8.3.5" + +[package.source] +type = "url" +url = "https://github.com/marceloneppel/postgresql-single-kernel-library/archive/c1ef734fd749b360e14fdb5a24c55315cb0a9000.zip" + [[package]] name = "prompt-toolkit" version = "3.0.51" @@ -1565,7 +1600,6 @@ files = [ {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, - {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, @@ -1577,7 +1611,7 @@ version = "2.9.10" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = false python-versions = ">=3.8" -groups = ["integration"] +groups = ["main", "integration"] files = [ {file = "psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2"}, {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f"}, @@ -1626,7 +1660,6 @@ files = [ {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"}, {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"}, {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"}, - {file = "psycopg2_binary-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:27422aa5f11fbcd9b18da48373eb67081243662f9b46e6fd07c3eb46e4535142"}, {file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"}, {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"}, {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"}, @@ -1790,7 +1823,7 @@ version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["integration", "unit"] +groups = ["main", "integration", "unit"] files = [ {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, @@ -1863,7 +1896,7 @@ version = "8.4.1" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.9" -groups = ["integration", "unit"] +groups = ["main", "integration", "unit"] files = [ {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, @@ -2635,4 +2668,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "1a753ccbe19c93e3bc64dc634032c29db0bc6f8f59de136d7ff58f69dc986b0d" +content-hash = "0a82a680e0354f4a27284a7c59a94ae890dab107b4cb08e579006382d4a8cb3b" diff --git a/pyproject.toml b/pyproject.toml index fede27ae23..f05d783570 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,7 @@ jinja2 = "^3.1.6" lightkube = "^0.17.2" lightkube-models = "^1.28.1.4" psycopg2 = "^2.9.10" +postgresql-charms-single-kernel = {url = "https://github.com/marceloneppel/postgresql-single-kernel-library/archive/c1ef734fd749b360e14fdb5a24c55315cb0a9000.zip"} [tool.poetry.group.charm-libs.dependencies] # data_platform_libs/v0/data_interfaces.py @@ -69,6 +70,7 @@ psycopg2-binary = "^2.9.10" boto3 = "*" tenacity = "^9.1.2" allure-pytest = "^2.15.0" +jubilant = "^1.3.0" [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/src/charm.py b/src/charm.py index 93d6cbd34d..e323c8ed43 100755 --- a/src/charm.py +++ b/src/charm.py @@ -18,6 +18,21 @@ from typing import Literal, get_args from urllib.parse import urlparse +from single_kernel_postgresql.utils.postgresql import ( + ACCESS_GROUP_IDENTITY, + ACCESS_GROUPS, + REQUIRED_PLUGINS, + PostgreSQL, + PostgreSQLCreatePredefinedRolesError, + PostgreSQLCreateUserError, + PostgreSQLEnableDisableExtensionError, + PostgreSQLGetCurrentTimelineError, + PostgreSQLGrantDatabasePrivilegesToUserError, + PostgreSQLListGroupsError, + PostgreSQLListUsersError, + PostgreSQLUpdateUserPasswordError, +) + from authorisation_rules_observer import ( AuthorisationRulesChangeCharmEvents, AuthorisationRulesObserver, @@ -42,16 +57,6 @@ from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.grafana_k8s.v0.grafana_dashboard import GrafanaDashboardProvider from charms.loki_k8s.v1.loki_push_api import LogProxyConsumer -from charms.postgresql_k8s.v0.postgresql import ( - ACCESS_GROUP_IDENTITY, - ACCESS_GROUPS, - REQUIRED_PLUGINS, - PostgreSQL, - PostgreSQLEnableDisableExtensionError, - PostgreSQLGetCurrentTimelineError, - PostgreSQLListGroupsError, - PostgreSQLUpdateUserPasswordError, -) from charms.postgresql_k8s.v0.postgresql_tls import PostgreSQLTLS from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider from charms.rolling_ops.v0.rollingops import RollingOpsManager, RunWithLock @@ -1164,6 +1169,33 @@ def _initialize_cluster(self, event: WorkloadEvent) -> bool: event.defer() return False + try: + self._setup_users() + except PostgreSQLCreatePredefinedRolesError as e: + logger.exception(e) + self.unit.status = BlockedStatus("Failed to create pre-defined roles") + return False + except PostgreSQLGrantDatabasePrivilegesToUserError as e: + logger.exception(e) + self.unit.status = BlockedStatus("Failed to grant database privileges to user") + return False + except PostgreSQLCreateUserError as e: + logger.exception(e) + self.set_unit_status(BlockedStatus("Failed to create postgres user")) + return False + except PostgreSQLListUsersError: + logger.warning("Deferring on_start: Unable to list users") + event.defer() + return False + + # Mark the cluster as initialised. + self._peers.data[self.app]["cluster_initialised"] = "True" + + return True + + def _setup_users(self) -> None: + self.postgresql.create_predefined_instance_roles() + pg_users = self.postgresql.list_users() # Create the backup user. if BACKUP_USER not in pg_users: @@ -1183,11 +1215,6 @@ def _initialize_cluster(self, event: WorkloadEvent) -> bool: self.postgresql.create_access_groups() self.postgresql.grant_internal_access_group_memberships() - # Mark the cluster as initialised. - self._peers.data[self.app]["cluster_initialised"] = "True" - - return True - @property def is_blocked(self) -> bool: """Returns whether the unit is in a blocked state.""" @@ -1609,6 +1636,12 @@ def _was_restore_successful(self, container: Container, service: ServiceInfo) -> logger.debug("Restore check early exit: Patroni has not started yet") return False + try: + self._setup_users() + except Exception as e: + logger.exception(e) + return False + restoring_backup = self.app_peer_data.get("restoring-backup") restore_timeline = self.app_peer_data.get("restore-timeline") restore_to_time = self.app_peer_data.get("restore-to-time") diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 0c94c798d3..1da6203b31 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -9,7 +9,10 @@ DatabaseProvides, DatabaseRequestedEvent, ) -from charms.postgresql_k8s.v0.postgresql import ( +from ops.charm import CharmBase, RelationBrokenEvent, RelationDepartedEvent +from ops.framework import Object +from ops.model import ActiveStatus, BlockedStatus, Relation +from single_kernel_postgresql.utils.postgresql import ( ACCESS_GROUP_RELATION, ACCESS_GROUPS, INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE, @@ -18,9 +21,6 @@ PostgreSQLDeleteUserError, PostgreSQLGetPostgreSQLVersionError, ) -from ops.charm import CharmBase, RelationBrokenEvent, RelationDepartedEvent -from ops.framework import Object -from ops.model import ActiveStatus, BlockedStatus, Relation from constants import DATABASE_PORT from utils import new_password @@ -107,12 +107,13 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: try: # Creates the user and the database for this specific relation. user = f"relation_id_{event.relation.id}" - password = new_password() - self.charm.postgresql.create_user(user, password, extra_user_roles=extra_user_roles) plugins = self.charm.get_plugins() - self.charm.postgresql.create_database( - database, user, plugins=plugins, client_relations=self.charm.client_relations + self.charm.postgresql.create_database(database, plugins=plugins) + + password = new_password() + self.charm.postgresql.create_user( + user, password, extra_user_roles=extra_user_roles, database=database ) # Share the credentials with the application. diff --git a/src/upgrade.py b/src/upgrade.py index 66dd6f0ef4..52d6255197 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -13,13 +13,13 @@ DependencyModel, KubernetesClientError, ) -from charms.postgresql_k8s.v0.postgresql import ACCESS_GROUPS from lightkube.core.client import Client from lightkube.core.exceptions import ApiError from lightkube.resources.apps_v1 import StatefulSet from ops.charm import UpgradeCharmEvent, WorkloadEvent from ops.model import BlockedStatus, MaintenanceStatus, RelationDataContent from pydantic import BaseModel +from single_kernel_postgresql.utils.postgresql import ACCESS_GROUPS from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed from constants import APP_SCOPE, MONITORING_PASSWORD_KEY, MONITORING_USER, PATRONI_PASSWORD_KEY diff --git a/templates/patroni.yml.j2 b/templates/patroni.yml.j2 index 1aa47b2359..8ac826577d 100644 --- a/templates/patroni.yml.j2 +++ b/templates/patroni.yml.j2 @@ -46,7 +46,11 @@ bootstrap: log_truncate_on_rotation: 'on' logging_collector: 'on' wal_level: logical - shared_preload_libraries: 'timescaledb,pgaudit' + shared_preload_libraries: 'timescaledb,pgaudit,set_user' + session_preload_libraries: 'login_hook' + set_user.block_log_statement: 'on' + set_user.exit_on_error: 'on' + set_user.superuser_allowlist: '+charmed_dba' {%- if slots %} slots: {%- for slot, database in slots.items() %} @@ -124,7 +128,11 @@ postgresql: bin_dir: /usr/lib/postgresql/{{ version }}/bin listen: 0.0.0.0:5432 parameters: - shared_preload_libraries: 'timescaledb,pgaudit' + shared_preload_libraries: 'timescaledb,pgaudit,set_user' + session_preload_libraries: 'login_hook' + set_user.block_log_statement: 'on' + set_user.exit_on_error: 'on' + set_user.superuser_allowlist: '+charmed_dba' {%- if enable_pgbackrest_archiving %} archive_command: 'pgbackrest --stanza={{ stanza }} archive-push %p' {% else %} @@ -146,6 +154,13 @@ postgresql: pg_hba: - local all backup peer map=operator - local all monitoring password + - {{ 'hostssl' if enable_tls else 'host' }} all +charmed_stats 0.0.0.0/0 scram-sha-256 + - {{ 'hostssl' if enable_tls else 'host' }} all +charmed_read 0.0.0.0/0 scram-sha-256 + - {{ 'hostssl' if enable_tls else 'host' }} all +charmed_dml 0.0.0.0/0 scram-sha-256 + - {{ 'hostssl' if enable_tls else 'host' }} all +charmed_backup 0.0.0.0/0 scram-sha-256 + - {{ 'hostssl' if enable_tls else 'host' }} all +charmed_dba 0.0.0.0/0 scram-sha-256 + - {{ 'hostssl' if enable_tls else 'host' }} all +charmed_admin 0.0.0.0/0 scram-sha-256 + - {{ 'hostssl' if enable_tls else 'host' }} all +charmed_databases_owner 0.0.0.0/0 scram-sha-256 {%- if not connectivity %} - {{ 'hostssl' if enable_tls else 'host' }} all all {{ endpoint }}.{{ namespace }}.svc.cluster.local md5 - {{ 'hostssl' if enable_tls else 'host' }} all all 0.0.0.0/0 reject diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 57c3e4b87f..93a38cc683 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -5,11 +5,13 @@ import uuid import boto3 +import jubilant import pytest from pytest_operator.plugin import OpsTest from . import architecture from .helpers import construct_endpoint +from .jubilant_helpers import RoleAttributeValue AWS = "AWS" GCP = "GCP" @@ -94,3 +96,173 @@ async def gcp_cloud_configs(ops_test: OpsTest) -> None: yield config, credentials cleanup_cloud(config, credentials) + + +@pytest.fixture(scope="module") +def juju(request: pytest.FixtureRequest): + """Pytest fixture that wraps :meth:`jubilant.with_model`. + + This adds command line parameter ``--keep-models`` (see help for details). + """ + controller = request.config.getoption("--controller") + model = request.config.getoption("--model") + controller_and_model = None + if controller and model: + controller_and_model = f"{controller}:{model}" + elif controller: + controller_and_model = controller + elif model: + controller_and_model = model + keep_models = bool(request.config.getoption("--keep-models")) + + if controller_and_model: + juju = jubilant.Juju(model=controller_and_model) # type: ignore + yield juju + log = juju.debug_log(limit=1000) + else: + with jubilant.temp_model(keep=keep_models) as juju: + yield juju + log = juju.debug_log(limit=1000) + + if request.session.testsfailed: + print(log, end="") + + +@pytest.fixture(scope="module") +def predefined_roles() -> dict: + """Return a list of predefined roles with their expected permissions.""" + return { + "": { + "auto-escalate-to-database-owner": RoleAttributeValue.REQUESTED_DATABASE, + "permissions": { + "connect": RoleAttributeValue.REQUESTED_DATABASE, + "create-databases": RoleAttributeValue.NO, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.REQUESTED_DATABASE, + "read-data": RoleAttributeValue.NO, + "read-stats": RoleAttributeValue.ALL_DATABASES, + "run-backup-commands": RoleAttributeValue.NO, + "set-up-predefined-catalog-roles": RoleAttributeValue.NO, + "set-user": RoleAttributeValue.NO, + "write-data": RoleAttributeValue.NO, + }, + }, + "charmed_stats": { + "auto-escalate-to-database-owner": RoleAttributeValue.NO, + "permissions": { + "connect": RoleAttributeValue.ALL_DATABASES, + "create-databases": RoleAttributeValue.NO, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.NO, + "read-data": RoleAttributeValue.NO, + "read-stats": RoleAttributeValue.ALL_DATABASES, + "run-backup-commands": RoleAttributeValue.NO, + "set-up-predefined-catalog-roles": RoleAttributeValue.NO, + "set-user": RoleAttributeValue.NO, + "write-data": RoleAttributeValue.NO, + }, + }, + "charmed_read": { + "auto-escalate-to-database-owner": RoleAttributeValue.NO, + "permissions": { + "connect": RoleAttributeValue.ALL_DATABASES, + "create-databases": RoleAttributeValue.NO, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.NO, + "read-data": RoleAttributeValue.ALL_DATABASES, + "read-stats": RoleAttributeValue.ALL_DATABASES, + "run-backup-commands": RoleAttributeValue.NO, + "set-up-predefined-catalog-roles": RoleAttributeValue.NO, + "set-user": RoleAttributeValue.NO, + "write-data": RoleAttributeValue.NO, + }, + }, + "charmed_dml": { + "auto-escalate-to-database-owner": RoleAttributeValue.NO, + "permissions": { + "connect": RoleAttributeValue.ALL_DATABASES, + "create-databases": RoleAttributeValue.NO, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.NO, + "read-data": RoleAttributeValue.ALL_DATABASES, + "read-stats": RoleAttributeValue.ALL_DATABASES, + "run-backup-commands": RoleAttributeValue.NO, + "set-up-predefined-catalog-roles": RoleAttributeValue.NO, + "set-user": RoleAttributeValue.NO, + "write-data": RoleAttributeValue.ALL_DATABASES, + }, + }, + "charmed_backup": { + "auto-escalate-to-database-owner": RoleAttributeValue.NO, + "permissions": { + "connect": RoleAttributeValue.ALL_DATABASES, + "create-databases": RoleAttributeValue.NO, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.NO, + "read-data": RoleAttributeValue.NO, + "read-stats": RoleAttributeValue.ALL_DATABASES, + "run-backup-commands": RoleAttributeValue.YES, + "set-up-predefined-catalog-roles": RoleAttributeValue.NO, + "set-user": RoleAttributeValue.NO, + "write-data": RoleAttributeValue.NO, + }, + }, + "charmed_dba": { + "auto-escalate-to-database-owner": RoleAttributeValue.NO, + "permissions": { + "connect": RoleAttributeValue.ALL_DATABASES, + "create-databases": RoleAttributeValue.NO, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.NO, + "read-data": RoleAttributeValue.ALL_DATABASES, + "read-stats": RoleAttributeValue.ALL_DATABASES, + "run-backup-commands": RoleAttributeValue.NO, + "set-up-predefined-catalog-roles": RoleAttributeValue.NO, + "set-user": RoleAttributeValue.YES, + "write-data": RoleAttributeValue.ALL_DATABASES, + }, + }, + "charmed_admin": { + "auto-escalate-to-database-owner": RoleAttributeValue.ALL_DATABASES, + "permissions": { + "connect": RoleAttributeValue.ALL_DATABASES, + "create-databases": RoleAttributeValue.NO, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.ALL_DATABASES, + "read-data": RoleAttributeValue.ALL_DATABASES, + "read-stats": RoleAttributeValue.ALL_DATABASES, + "run-backup-commands": RoleAttributeValue.NO, + "set-up-predefined-catalog-roles": RoleAttributeValue.NO, + "set-user": RoleAttributeValue.NO, + "write-data": RoleAttributeValue.ALL_DATABASES, + }, + }, + "CREATEDB": { + "auto-escalate-to-database-owner": RoleAttributeValue.NO, + "permissions": { + "connect": RoleAttributeValue.ALL_DATABASES, + "create-databases": RoleAttributeValue.YES, + "create-objects": RoleAttributeValue.NO, + "escalate-to-database-owner": RoleAttributeValue.NO, + "read-data": RoleAttributeValue.NO, + "read-stats": RoleAttributeValue.NO, + "run-backup-commands": RoleAttributeValue.NO, + "set-up-predefined-catalog-roles": RoleAttributeValue.YES, + "set-user": RoleAttributeValue.NO, + "write-data": RoleAttributeValue.NO, + }, + }, + } + + +@pytest.fixture(scope="module") +def predefined_roles_combinations() -> list: + """Return a list of valid combinations of predefined roles.""" + return [ + ("",), + ("charmed_stats",), + ("charmed_read",), + ("charmed_dml",), + ("charmed_admin",), + ("charmed_admin", "CREATEDB"), + ] diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index c33db6ace3..8f3fa810eb 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -40,6 +40,7 @@ METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) DATABASE_APP_NAME = METADATA["name"] APPLICATION_NAME = "postgresql-test-app" +DATA_INTEGRATOR_APP_NAME = "data-integrator" STORAGE_PATH = METADATA["storage"]["data"]["location"] @@ -120,6 +121,23 @@ async def build_and_deploy( ) +def check_connected_user( + cursor, session_user: str, current_user: str, primary: bool = True +) -> None: + cursor.execute("SELECT session_user,current_user;") + result = cursor.fetchone() + if result is not None: + instance = "primary" if primary else "replica" + assert result[0] == session_user, ( + f"The session user should be the {session_user} user in the {instance} (it's currently {result[0]})" + ) + assert result[1] == current_user, ( + f"The current user should be the {current_user} user in the {instance} (it's currently {result[1]})" + ) + else: + assert False, "No result returned from the query" + + async def check_database_users_existence( ops_test: OpsTest, users_that_should_exist: list[str], diff --git a/tests/integration/jubilant_helpers.py b/tests/integration/jubilant_helpers.py new file mode 100644 index 0000000000..0cf140ddfc --- /dev/null +++ b/tests/integration/jubilant_helpers.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. +import json +import subprocess +from enum import Enum + +import jubilant + +from constants import PEER + +from .helpers import DATABASE_APP_NAME, SecretNotFoundError + + +class RoleAttributeValue(Enum): + NO = 0 + YES = 1 + REQUESTED_DATABASE = 2 + ALL_DATABASES = 3 + + +def get_credentials( + juju: jubilant.Juju, + unit_name: str, +) -> dict: + """Get the data integrator credentials. + + Args: + juju: the jubilant.Juju instance. + unit_name: the name of the unit. + + Returns: + the data integrator credentials. + """ + action = juju.run(unit_name, "get-credentials") + return action.results + + +def get_password( + username: str = "operator", + database_app_name: str = DATABASE_APP_NAME, +) -> str: + """Retrieve a user password from the secret. + + Args: + username: the user to get the password. + database_app_name: the app for getting the secret + + Returns: + the user password. + """ + secret = get_secret_by_label(label=f"{PEER}.{database_app_name}.app") + password = secret.get(f"{username}-password") + print(f"Retrieved password for {username}: {password}") + + return password + + +def get_primary(juju: jubilant.Juju, unit_name: str) -> str: + """Get the primary unit. + + Args: + juju: the jubilant.Juju instance. + unit_name: the name of the unit. + + Returns: + the current primary unit. + """ + action = juju.run(unit_name, "get-primary") + if "primary" not in action.results or action.results["primary"] not in juju.status().get_units( + unit_name.split("/")[0] + ): + assert False, "Primary unit not found" + return action.results["primary"] + + +def get_secret_by_label(label: str) -> dict[str, str]: + # Subprocess calls are used because some Juju commands are still missing in jubilant: + # https://github.com/canonical/jubilant/issues/117. + secrets_raw = subprocess.run(["juju", "list-secrets"], capture_output=True).stdout.decode( + "utf-8" + ) + secret_ids = [ + secret_line.split()[0] for secret_line in secrets_raw.split("\n")[1:] if secret_line + ] + + for secret_id in secret_ids: + secret_data_raw = subprocess.run( + ["juju", "show-secret", "--format", "json", "--reveal", secret_id], capture_output=True + ).stdout + secret_data = json.loads(secret_data_raw) + + if label == secret_data[secret_id].get("label"): + return secret_data[secret_id]["content"]["Data"] + + raise SecretNotFoundError(f"Secret with label {label} not found") + + +def get_unit_address(juju: jubilant.Juju, unit_name: str) -> str: + """Get the unit IP address. + + Args: + juju: the jubilant.Juju instance. + unit_name: The name of the unit + + Returns: + IP address of the unit + """ + return juju.status().get_units(unit_name.split("/")[0]).get(unit_name).address + + +def relations(juju: jubilant.Juju, provider_app: str, requirer_app: str) -> list: + return [ + relation + for relation in juju.status().apps.get(provider_app).relations.values() + if any( + True for relation_instance in relation if relation_instance.related_app == requirer_app + ) + ] + + +def roles_attributes(predefined_roles: dict, combination: str) -> dict: + auto_escalate_to_database_owner = RoleAttributeValue.NO + connect = RoleAttributeValue.NO + create_databases = RoleAttributeValue.NO + create_objects = RoleAttributeValue.NO + escalate_to_database_owner = RoleAttributeValue.NO + read_data = RoleAttributeValue.NO + read_stats = RoleAttributeValue.NO + run_backup_commands = RoleAttributeValue.NO + set_up_predefined_catalog_roles = RoleAttributeValue.NO + set_user = RoleAttributeValue.NO + write_data = RoleAttributeValue.NO + for role in combination.split(","): + # Whether the relation user is auto-escalated to the database owner user at login + # in the requested database (True value) or in all databases ("*" value). + will_auto_escalate_to_database_owner = predefined_roles[role][ + "auto-escalate-to-database-owner" + ] + if ( + auto_escalate_to_database_owner == RoleAttributeValue.NO + or will_auto_escalate_to_database_owner == "*" + ): + auto_escalate_to_database_owner = will_auto_escalate_to_database_owner + + role_permissions = predefined_roles[role]["permissions"] + + # Permission to connect to the requested database (True value) or to all databases + # ("*" value). + role_can_connect = role_permissions["connect"] + if connect == RoleAttributeValue.NO or role_can_connect == "*": + connect = role_can_connect + + # Permission to create databases (True or RoleAttributeValue.NO). + create_databases = ( + role_permissions["create-databases"] + if create_databases == RoleAttributeValue.NO + else create_databases + ) + + # Permission to create objects in the requested database (True value) or in all databases + # ("*" value). + role_can_create_objects = role_permissions["create-objects"] + if create_objects == RoleAttributeValue.NO or role_can_create_objects == "*": + create_objects = role_can_create_objects + + # Permission to escalate to the database owner user in the requested database (True value) + # or in all databases ("*" value). + role_can_escalate_to_database_owner = role_permissions["escalate-to-database-owner"] + if ( + escalate_to_database_owner == RoleAttributeValue.NO + or role_can_escalate_to_database_owner == "*" + ): + escalate_to_database_owner = role_can_escalate_to_database_owner + + # Permission to read data in the requested database (True value) or in all databases + # ("*" value). + role_can_read_data = role_permissions["read-data"] + if read_data == RoleAttributeValue.NO or role_can_read_data == "*": + read_data = role_can_read_data + + read_stats = ( + role_permissions["read-stats"] + if role_permissions["read-stats"] != RoleAttributeValue.NO + else read_stats + ) + + run_backup_commands = ( + role_permissions["run-backup-commands"] + if role_permissions["run-backup-commands"] != RoleAttributeValue.NO + else run_backup_commands + ) + + # Permission to set up predefined catalog roles ("*" for all databases or RoleAttributeValue.NO for not being + # able to do it). + role_can_set_up_predefined_catalog_roles = role_permissions[ + "set-up-predefined-catalog-roles" + ] + if ( + set_up_predefined_catalog_roles == RoleAttributeValue.NO + or role_can_set_up_predefined_catalog_roles == "*" + ): + set_up_predefined_catalog_roles = role_can_set_up_predefined_catalog_roles + + # Permission to call the set_user function (True or RoleAttributeValue.NO). + set_user = role_permissions["set-user"] if set_user == RoleAttributeValue.NO else set_user + + # Permission to write data in the requested database (True value) or in all databases + # ("*" value). + role_can_write_data = role_permissions["write-data"] + if write_data == RoleAttributeValue.NO or role_can_write_data == "*": + write_data = role_can_write_data + return { + "auto-escalate-to-database-owner": auto_escalate_to_database_owner, + "permissions": { + "connect": connect, + "create-databases": create_databases, + "create-objects": create_objects, + "escalate-to-database-owner": escalate_to_database_owner, + "read-data": read_data, + "read-stats": read_stats, + "run-backup-commands": run_backup_commands, + "set-up-predefined-catalog-roles": set_up_predefined_catalog_roles, + "set-user": set_user, + "write-data": write_data, + }, + } diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index efc7bf0b6a..1b08e24925 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -14,7 +14,12 @@ from constants import DATABASE_DEFAULT_NAME -from ..helpers import CHARM_BASE, check_database_users_existence, scale_application +from ..helpers import ( + CHARM_BASE, + DATA_INTEGRATOR_APP_NAME, + check_database_users_existence, + scale_application, +) from .helpers import ( build_connection_string, get_application_relation_data, @@ -25,7 +30,6 @@ APPLICATION_APP_NAME = "postgresql-test-app" DATABASE_APP_NAME = "database" ANOTHER_DATABASE_APP_NAME = "another-database" -DATA_INTEGRATOR_APP_NAME = "data-integrator" DISCOURSE_APP_NAME = "discourse-k8s" REDIS_APP_NAME = "redis-k8s" APP_NAMES = [APPLICATION_APP_NAME, DATABASE_APP_NAME, ANOTHER_DATABASE_APP_NAME] diff --git a/tests/integration/test_predefined_roles.py b/tests/integration/test_predefined_roles.py new file mode 100644 index 0000000000..d38db28bf0 --- /dev/null +++ b/tests/integration/test_predefined_roles.py @@ -0,0 +1,791 @@ +#!/usr/bin/env python3 +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. +import logging +from time import sleep + +import jubilant +import psycopg2 +import pytest as pytest +from psycopg2.sql import SQL, Identifier + +from .helpers import ( + DATA_INTEGRATOR_APP_NAME, + DATABASE_APP_NAME, + METADATA, + check_connected_user, + db_connect, +) +from .jubilant_helpers import ( + RoleAttributeValue, + get_credentials, + get_password, + get_primary, + get_unit_address, + relations, + roles_attributes, +) + +logger = logging.getLogger(__name__) + +OTHER_DATABASE_NAME = "other-database" +REQUESTED_DATABASE_NAME = "requested-database" +RELATION_ENDPOINT = "postgresql" +ROLE_BACKUP = "charmed_backup" +ROLE_DBA = "charmed_dba" +ROLE_DATABASES_OWNER = "charmed_databases_owner" +NO_CATALOG_LEVEL_ROLES_DATABASES = [OTHER_DATABASE_NAME, "postgres", "template1"] +TIMEOUT = 15 * 60 + + +@pytest.mark.abort_on_fail +def test_deploy(juju: jubilant.Juju, charm, predefined_roles_combinations) -> None: + """Deploy and relate the charms.""" + # Deploy the database charm if not already deployed. + if DATABASE_APP_NAME not in juju.status().apps: + logger.info("Deploying database charm") + resources = { + "postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"] + } + juju.deploy( + charm, + config={"profile": "testing"}, + num_units=1, + resources=resources, + trust=True, + ) + + combinations = [*predefined_roles_combinations, (ROLE_BACKUP,), (ROLE_DBA,)] + for combination in combinations: + # Define an application name suffix and a database name based on the combination + # of predefined roles. + suffix = ( + f"-{'-'.join(combination)}".replace("_", "-").lower() + if "-".join(combination) != "" + else "" + ) + database_name = f"{REQUESTED_DATABASE_NAME}{suffix}" + + # Deploy the data integrator charm for each combination of predefined roles. + data_integrator_app_name = f"{DATA_INTEGRATOR_APP_NAME}{suffix}" + extra_user_roles = ( + "" if combination[0] in [ROLE_BACKUP, ROLE_DBA] else ",".join(combination) + ) + if data_integrator_app_name not in juju.status().apps: + logger.info( + f"Deploying data integrator charm {'with extra user roles: ' + extra_user_roles.replace(',', ', ') if extra_user_roles else 'without extra user roles'}" + ) + juju.deploy( + DATA_INTEGRATOR_APP_NAME, + app=data_integrator_app_name, + config={"database-name": database_name, "extra-user-roles": extra_user_roles}, + ) + + # Relate the data integrator charm to the database charm. + existing_relations = relations(juju, DATABASE_APP_NAME, data_integrator_app_name) + if not existing_relations: + logger.info("Adding relation between charms") + juju.integrate(data_integrator_app_name, DATABASE_APP_NAME) + + juju.wait(lambda status: jubilant.all_active(status), timeout=TIMEOUT) + + +def test_operations(juju: jubilant.Juju, predefined_roles) -> None: # noqa: C901 + """Check that the data integrator user can perform the expected operations in each database.""" + primary = get_primary(juju, f"{DATABASE_APP_NAME}/0") + host = get_unit_address(juju, primary) + operator_password = get_password() + connection = None + cursor = None + try: + connection = db_connect(host, operator_password) + connection.autocommit = True + cursor = connection.cursor() + cursor.execute(f'DROP DATABASE IF EXISTS "{OTHER_DATABASE_NAME}";') + cursor.execute(f'CREATE DATABASE "{OTHER_DATABASE_NAME}";') + cursor.execute("SELECT datname FROM pg_database WHERE datname != 'template0';") + databases = [] + for database in sorted(database[0] for database in cursor.fetchall()): + if database.startswith(f"{OTHER_DATABASE_NAME}-"): + logger.info(f"Dropping database {database} created by the test") + cursor.execute(SQL("DROP DATABASE {};").format(Identifier(database))) + else: + databases.append(database) + sub_connection = None + try: + sub_connection = db_connect(host, operator_password, database=database) + sub_connection.autocommit = True + with sub_connection.cursor() as sub_cursor: + sub_cursor.execute("SELECT schema_name FROM information_schema.schemata;") + for schema in sub_cursor.fetchall(): + schema_name = schema[0] + if schema_name.startswith("relation_id_") and schema_name.endswith( + "_schema" + ): + logger.info(f"Dropping schema {schema_name} created by the test") + sub_cursor.execute( + SQL("DROP SCHEMA {} CASCADE;").format(Identifier(schema_name)) + ) + sub_cursor.execute( + "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public';" + ) + for table in sub_cursor.fetchall(): + table_name = table[0] + if table_name.startswith("test_table_"): + logger.info(f"Dropping table {table_name} created by the test") + sub_cursor.execute( + SQL("DROP TABLE public.{} CASCADE;").format( + Identifier(table_name) + ) + ) + finally: + if sub_connection is not None: + sub_connection.close() + logger.info(f"Databases to test: {databases}") + finally: + if cursor is not None: + cursor.close() + if connection is not None: + connection.close() + + sleep(90) + + data_integrator_apps = [ + app for app in juju.status().apps if app.startswith(DATA_INTEGRATOR_APP_NAME) + ] + for data_integrator_app_name in data_integrator_apps: + credentials = get_credentials(juju, f"{data_integrator_app_name}/0") + user = credentials["postgresql"]["username"] + password = credentials["postgresql"]["password"] + database = credentials["postgresql"]["database"] + config = juju.config(app=data_integrator_app_name) + logger.info(f"Config for {data_integrator_app_name}: {config}") + if data_integrator_app_name.endswith(ROLE_BACKUP.replace("_", "-")): + connection = None + try: + with db_connect(host, operator_password) as connection: + connection.autocommit = True + with connection.cursor() as cursor: + logger.info( + f"Granting {ROLE_BACKUP} role to {user} user to correctly check that role permissions" + ) + cursor.execute( + SQL("GRANT {} TO {};").format( + Identifier(ROLE_BACKUP), Identifier(user) + ) + ) + cursor.execute( + SQL("REVOKE {} FROM {};").format( + Identifier(f"charmed_{database}_dml"), Identifier(user) + ) + ) + cursor.execute( + SQL("REVOKE {} FROM {};").format( + Identifier(f"charmed_{database}_admin"), Identifier(user) + ) + ) + for system_database in ["postgres", "template1"]: + cursor.execute( + SQL("GRANT CONNECT ON DATABASE {} TO {};").format( + Identifier(system_database), Identifier(user) + ) + ) + finally: + if connection is not None: + connection.close() + + extra_user_roles = ROLE_BACKUP + elif data_integrator_app_name.endswith(ROLE_DBA.replace("_", "-")): + connection = None + try: + with db_connect(host, operator_password) as connection: + connection.autocommit = True + with connection.cursor() as cursor: + logger.info( + f"Granting {ROLE_DBA} role to {user} user to correctly check that role permissions" + ) + cursor.execute( + SQL("GRANT {} TO {};").format(Identifier(ROLE_DBA), Identifier(user)) + ) + cursor.execute( + SQL("REVOKE {} FROM {};").format( + Identifier(f"charmed_{database}_dml"), Identifier(user) + ) + ) + cursor.execute( + SQL("REVOKE {} FROM {};").format( + Identifier(f"charmed_{database}_admin"), Identifier(user) + ) + ) + for system_database in ["postgres", "template1"]: + cursor.execute( + SQL("GRANT CONNECT ON DATABASE {} TO {};").format( + Identifier(system_database), Identifier(user) + ) + ) + finally: + if connection is not None: + connection.close() + + extra_user_roles = ROLE_DBA + else: + extra_user_roles = config.get("extra-user-roles", "") + logger.info( + f"User is {user}, database is {database}, extra user roles are '{extra_user_roles}'" + ) + + sleep(90) + + attributes = roles_attributes(predefined_roles, extra_user_roles) + logger.info(f"Attributes for user {user}: '{attributes}'") + message_prefix = f"Checking that {user} user ({'with extra user roles: ' + extra_user_roles.replace(',', ', ') if extra_user_roles else 'without extra user roles'})" + for database_to_test in databases: + connection = None + cursor = None + operator_connection = None + operator_cursor = None + try: + connect_permission = attributes["permissions"]["connect"] + run_backup_commands_permission = attributes["permissions"]["run-backup-commands"] + set_user_permission = attributes["permissions"]["set-user"] + if ( + connect_permission == RoleAttributeValue.ALL_DATABASES + or ( + connect_permission == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + or database_to_test == OTHER_DATABASE_NAME + ): + logger.info(f"{message_prefix} can connect to {database_to_test} database") + connection = db_connect(host, password, user=user, database=database_to_test) + connection.autocommit = True + with connection.cursor() as cursor: + cursor.execute("SELECT current_database();") + assert cursor.fetchone()[0] == database_to_test + else: + logger.info(f"{message_prefix} can't connect to {database_to_test} database") + with pytest.raises(psycopg2.OperationalError): + db_connect(host, password, user=user, database=database_to_test) + + if connection is not None: + auto_escalate_to_database_owner = attributes["auto-escalate-to-database-owner"] + database_owner_user = f"charmed_{database_to_test}_owner" + with connection, connection.cursor() as cursor: + if ( + auto_escalate_to_database_owner == RoleAttributeValue.ALL_DATABASES + and database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES + ) or ( + auto_escalate_to_database_owner + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ): + logger.info( + f"{message_prefix} auto escalates to {database_owner_user}" + ) + check_connected_user(cursor, user, database_owner_user) + else: + logger.info( + f"{message_prefix} doesn't auto escalate to {database_owner_user}" + ) + check_connected_user(cursor, user, user) + + # Test escalation to the database owner user. + escalate_to_database_owner_permission = attributes["permissions"][ + "escalate-to-database-owner" + ] + with connection.cursor() as cursor: + cursor.execute("SELECT current_user;") + previous_current_user = cursor.fetchone()[0] + cursor.execute("RESET ROLE;") + check_connected_user(cursor, user, user) + if ( + escalate_to_database_owner_permission == RoleAttributeValue.ALL_DATABASES + and database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES + ) or ( + escalate_to_database_owner_permission + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ): + logger.info(f"{message_prefix} can escalate to {database_owner_user}") + with connection.cursor() as cursor: + cursor.execute( + SQL("SET ROLE {};").format(Identifier(database_owner_user)) + ) + check_connected_user(cursor, user, database_owner_user) + elif ( + database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES + ): # Because there is not charmed_database_owner role in those databases. + logger.info(f"{message_prefix} can't escalate to {database_owner_user}") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute( + SQL("SET ROLE {};").format(Identifier(database_owner_user)) + ) + with connection.cursor() as cursor: + cursor.execute("SELECT current_user;") + current_user = cursor.fetchone()[0] + if current_user != previous_current_user: + cursor.execute( + SQL("SET ROLE {};").format(Identifier(previous_current_user)) + ) + + # Test objects creation. + create_objects_permission = attributes["permissions"]["create-objects"] + schema_name = f"{user}_schema" + create_schema_statement = SQL("CREATE SCHEMA {};").format( + Identifier(schema_name) + ) + create_table_statement = SQL("CREATE TABLE {}.test_table(value TEXT);").format( + Identifier(schema_name) + ) + create_table_in_public_schema_statement = SQL( + "CREATE TABLE public.{}(value TEXT);" + ).format(Identifier(f"test_table_{user}")) + create_view_statement = SQL( + "CREATE VIEW {}.test_view AS SELECT * FROM {}.test_table;" + ).format(Identifier(schema_name), Identifier(schema_name)) + create_view_in_public_schema_statement = SQL( + "CREATE VIEW public.{} AS SELECT * FROM public.{};" + ).format(Identifier(f"test_view_{user}"), Identifier(f"test_table_{user}")) + if ( + ( + create_objects_permission == RoleAttributeValue.ALL_DATABASES + and database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES + ) + or ( + create_objects_permission == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + or ( + escalate_to_database_owner_permission + == RoleAttributeValue.ALL_DATABASES + and database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES + ) + or ( + escalate_to_database_owner_permission + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + ): + with connection.cursor() as cursor: + if ( + ( + escalate_to_database_owner_permission + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + or escalate_to_database_owner_permission + == RoleAttributeValue.ALL_DATABASES + ) and auto_escalate_to_database_owner == RoleAttributeValue.NO: + cursor.execute( + SQL("SET ROLE {};").format(Identifier(database_owner_user)) + ) + logger.info(f"{message_prefix} can create schemas") + cursor.execute(create_schema_statement) + logger.info(f"{message_prefix} can create tables") + cursor.execute(create_table_statement) + logger.info(f"{message_prefix} can create tables in public schema") + cursor.execute(create_table_in_public_schema_statement) + logger.info(f"{message_prefix} can create view") + cursor.execute(create_view_statement) + logger.info(f"{message_prefix} can create views in public schema") + cursor.execute(create_view_in_public_schema_statement) + else: + operator_connection = db_connect( + host, operator_password, database=database_to_test + ) + operator_connection.autocommit = True + operator_cursor = operator_connection.cursor() + + logger.info(f"{message_prefix} can't create schemas") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(create_schema_statement) + operator_cursor.execute(create_schema_statement) + + logger.info(f"{message_prefix} can't create tables") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(create_table_statement) + operator_cursor.execute(create_table_statement) + + logger.info(f"{message_prefix} can't create tables in public schema") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(create_table_in_public_schema_statement) + operator_cursor.execute(create_table_in_public_schema_statement) + + logger.info(f"{message_prefix} can't create views") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(create_view_statement) + operator_cursor.execute(create_view_statement) + + logger.info(f"{message_prefix} can't create views in public schema") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(create_view_in_public_schema_statement) + operator_cursor.execute(create_view_in_public_schema_statement) + + operator_cursor.close() + operator_cursor = None + operator_connection.close() + operator_connection = None + + # Test write permissions. + write_data_permission = attributes["permissions"]["write-data"] + insert_statement = SQL("INSERT INTO {}.test_table VALUES ('test');").format( + Identifier(schema_name) + ) + update_statement = SQL( + "UPDATE {}.test_table SET value = 'updated' WHERE value = 'test';" + ).format(Identifier(schema_name)) + delete_statement = SQL( + "DELETE FROM {}.test_table WHERE value = 'updated';" + ).format(Identifier(schema_name)) + insert_in_public_schema_statement = SQL( + "INSERT INTO public.{} VALUES ('test');" + ).format(Identifier(f"test_table_{user}")) + if ( + write_data_permission == RoleAttributeValue.ALL_DATABASES + or ( + write_data_permission == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + or escalate_to_database_owner_permission + == RoleAttributeValue.ALL_DATABASES + or ( + escalate_to_database_owner_permission + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + ): + with connection.cursor() as cursor: + logger.info( + f"{message_prefix} can write to tables in {schema_name} schema" + ) + if database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES and ( + ( + ( + escalate_to_database_owner_permission + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + or escalate_to_database_owner_permission + == RoleAttributeValue.ALL_DATABASES + ) + and auto_escalate_to_database_owner == RoleAttributeValue.NO + ): + cursor.execute( + SQL("SET ROLE {};").format(Identifier(database_owner_user)) + ) + cursor.execute(insert_statement) + cursor.execute(update_statement) + cursor.execute(delete_statement) + logger.info(f"{message_prefix} can write to tables in public schema") + cursor.execute(insert_in_public_schema_statement) + else: + logger.info( + f"{message_prefix} can't write to tables in {schema_name} schema" + ) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(insert_statement) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(update_statement) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(delete_statement) + logger.info(f"{message_prefix} can't write to tables in public schema") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(insert_in_public_schema_statement) + + # Test read permissions. + read_data_permission = attributes["permissions"]["read-data"] + select_statement = SQL("SELECT * FROM {}.test_table;").format( + Identifier(schema_name) + ) + select_in_public_schema_statement = SQL("SELECT * FROM public.{};").format( + Identifier(f"test_table_{user}") + ) + select_view_statement = SQL("SELECT * FROM {}.test_view;").format( + Identifier(schema_name) + ) + select_view_in_public_schema_statement = SQL( + "SELECT * FROM public.{};" + ).format(Identifier(f"test_view_{user}")) + if ( + read_data_permission == RoleAttributeValue.ALL_DATABASES + or ( + read_data_permission == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + or escalate_to_database_owner_permission + == RoleAttributeValue.ALL_DATABASES + or ( + escalate_to_database_owner_permission + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + ): + with connection.cursor() as cursor: + if database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES and ( + ( + ( + escalate_to_database_owner_permission + == RoleAttributeValue.REQUESTED_DATABASE + and database_to_test == database + ) + or escalate_to_database_owner_permission + == RoleAttributeValue.ALL_DATABASES + ) + and auto_escalate_to_database_owner == RoleAttributeValue.NO + ): + cursor.execute( + SQL("SET ROLE {};").format(Identifier(database_owner_user)) + ) + logger.info( + f"{message_prefix} can read from tables in {schema_name} schema" + ) + cursor.execute(select_statement) + logger.info(f"{message_prefix} can read from tables in public schema") + cursor.execute(select_in_public_schema_statement) + logger.info( + f"{message_prefix} can read from views in {schema_name} schema" + ) + cursor.execute(select_view_statement) + logger.info(f"{message_prefix} can read from views in public schema") + cursor.execute(select_view_in_public_schema_statement) + else: + logger.info( + f"{message_prefix} can't read from tables in {schema_name} schema" + ) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(select_statement) + logger.info(f"{message_prefix} can't read from tables in public schema") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(select_in_public_schema_statement) + logger.info( + f"{message_prefix} can't read from views in {schema_name} schema" + ) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(select_view_statement) + logger.info(f"{message_prefix} can't read from views in public schema") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(select_view_in_public_schema_statement) + + if attributes["permissions"]["read-stats"] == RoleAttributeValue.ALL_DATABASES: + logger.info(f"{message_prefix} can read stats") + with connection.cursor() as cursor: + cursor.execute("SELECT * FROM pg_stat_activity;") + else: + logger.info(f"{message_prefix} can't read stats") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute("SELECT * FROM pg_stat_activity;") + + checkpoint_command = "CHECKPOINT;" + backup_start_command = "SELECT pg_backup_start('test');" + backup_stop_command = "SELECT pg_backup_stop();" + create_restore_point_command = "SELECT pg_create_restore_point('test');" + switch_wal_command = "SELECT pg_switch_wal();" + if run_backup_commands_permission == RoleAttributeValue.YES: + logger.info(f"{message_prefix} can run checkpoint command") + with connection.cursor() as cursor: + cursor.execute(checkpoint_command) + else: + logger.info(f"{message_prefix} can't run checkpoint command") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(checkpoint_command) + + if run_backup_commands_permission == RoleAttributeValue.YES: + logger.info(f"{message_prefix} can run backup commands") + with connection.cursor() as cursor: + cursor.execute(backup_start_command) + cursor.execute(backup_stop_command) + cursor.execute(create_restore_point_command) + cursor.execute(switch_wal_command) + else: + logger.info(f"{message_prefix} can't run backup commands") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(backup_start_command) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(backup_stop_command) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(create_restore_point_command) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(switch_wal_command) + + if ( + set_user_permission == RoleAttributeValue.YES + and database_to_test not in NO_CATALOG_LEVEL_ROLES_DATABASES + ): + logger.info(f"{message_prefix} can call the set_user function") + with connection.cursor() as cursor: + cursor.execute("RESET ROLE;") + cursor.execute("SELECT set_user('rewind'::TEXT);") + check_connected_user(cursor, user, "rewind") + cursor.execute("SELECT reset_user();") + check_connected_user(cursor, user, user) + cursor.execute("SELECT set_user_u('operator'::TEXT);") + check_connected_user(cursor, user, "operator") + cursor.execute("SELECT reset_user();") + check_connected_user(cursor, user, user) + else: + logger.info(f"{message_prefix} can't call the set_user function") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute("RESET ROLE;") + cursor.execute("SELECT set_user('rewind'::TEXT);") + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute("RESET ROLE;") + cursor.execute("SELECT set_user_u('operator'::TEXT);") + + # Do the following operations only once. + if database_to_test == database: + # Test permission to call the set_up_predefined_catalog_roles function. + statement = "SELECT set_up_predefined_catalog_roles();" + if ( + attributes["permissions"]["set-up-predefined-catalog-roles"] + == RoleAttributeValue.YES + ): + logger.info( + f"{message_prefix} can call the set-up-predefined-catalog-roles function" + ) + with connection.cursor() as cursor: + cursor.execute( + SQL("SET ROLE {};").format(Identifier(ROLE_DATABASES_OWNER)) + ) + cursor.execute(statement) + else: + logger.info( + f"{message_prefix} can't call the set-up-predefined-catalog-roles function" + ) + with ( + pytest.raises(psycopg2.errors.InsufficientPrivilege), + connection.cursor() as cursor, + ): + cursor.execute(statement) + + # Test database creation, change and removal. + cursor = connection.cursor() + new_database_name = f"{OTHER_DATABASE_NAME}-{user}" + create_database_statement = SQL("CREATE DATABASE {};").format( + Identifier(new_database_name) + ) + first_alter_database_statement = SQL( + "ALTER DATABASE {} RENAME TO {};" + ).format( + Identifier(new_database_name), Identifier(f"{new_database_name}-1") + ) + second_alter_database_statement = SQL( + "ALTER DATABASE {} RENAME TO {};" + ).format( + Identifier(f"{new_database_name}-1"), Identifier(new_database_name) + ) + first_drop_database_statement = SQL("DROP DATABASE {};").format( + Identifier(new_database_name) + ) + second_drop_database_statement = SQL("DROP DATABASE {};").format( + Identifier(OTHER_DATABASE_NAME) + ) + if attributes["permissions"]["create-databases"] == RoleAttributeValue.YES: + logger.info(f"{message_prefix} can create databases") + cursor.execute(create_database_statement) + logger.info(f"{message_prefix} can alter databases") + cursor.execute(first_alter_database_statement) + cursor.execute(second_alter_database_statement) + logger.info(f"{message_prefix} can drop databases owned by the user") + cursor.execute(first_drop_database_statement) + logger.info( + f"{message_prefix} can't drop databases not owned by the user" + ) + with pytest.raises(psycopg2.errors.InsufficientPrivilege): + cursor.execute(second_drop_database_statement) + else: + logger.info(f"{message_prefix} can't create databases") + with pytest.raises(psycopg2.errors.InsufficientPrivilege): + cursor.execute(create_database_statement) + + operator_connection = db_connect( + host, operator_password, database=database_to_test + ) + operator_connection.autocommit = True + operator_cursor = operator_connection.cursor() + operator_cursor.execute(create_database_statement) + operator_cursor.close() + operator_cursor = None + operator_connection.close() + operator_connection = None + + logger.info(f"{message_prefix} can't alter databases") + with pytest.raises(psycopg2.errors.InsufficientPrivilege): + cursor.execute(first_alter_database_statement) + logger.info(f"{message_prefix} can't drop databases") + with pytest.raises(psycopg2.errors.InsufficientPrivilege): + cursor.execute(first_drop_database_statement) + finally: + if cursor is not None: + cursor.close() + if connection is not None: + connection.close() + if operator_cursor is not None: + operator_cursor.close() + if operator_connection is not None: + operator_connection.close() diff --git a/tests/spread/test_predefined_roles.py/task.yaml b/tests/spread/test_predefined_roles.py/task.yaml new file mode 100644 index 0000000000..bbafb2a6bf --- /dev/null +++ b/tests/spread/test_predefined_roles.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_predefined_roles.py +environment: + TEST_MODULE: test_predefined_roles.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index e03bc8f6a9..88d668af64 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -653,7 +653,7 @@ def test_on_update_status_after_restore_operation(harness): ) as _handle_processes_failures, patch("charm.PostgreSQLBackups.can_use_s3_repository") as _can_use_s3_repository, patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL.get_current_timeline" + "single_kernel_postgresql.utils.postgresql.PostgreSQL.get_current_timeline" ) as _get_current_timeline, patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, diff --git a/tests/unit/test_postgresql.py b/tests/unit/test_postgresql.py deleted file mode 100644 index e76f949bbd..0000000000 --- a/tests/unit/test_postgresql.py +++ /dev/null @@ -1,534 +0,0 @@ -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. -from unittest.mock import call, patch - -import psycopg2 -import pytest -from charms.postgresql_k8s.v0.postgresql import ( - ACCESS_GROUP_INTERNAL, - ACCESS_GROUPS, - PERMISSIONS_GROUP_ADMIN, - PostgreSQLCreateDatabaseError, - PostgreSQLGetLastArchivedWALError, -) -from ops.testing import Harness -from psycopg2.sql import SQL, Composed, Identifier, Literal - -from charm import PostgresqlOperatorCharm -from constants import ( - BACKUP_USER, - MONITORING_USER, - PEER, - REPLICATION_USER, - REWIND_USER, - SYSTEM_USERS, - USER, -) - - -@pytest.fixture(autouse=True) -def harness(): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() - - -@pytest.mark.parametrize("users_exist", [True, False]) -def test_create_access_groups(harness, users_exist): - with patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" - ) as _connect_to_database: - execute = _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.execute - _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.fetchone.return_value = ( - True if users_exist else None - ) - harness.charm.postgresql.create_access_groups() - calls = [ - *( - call( - Composed([ - SQL("SELECT TRUE FROM pg_roles WHERE rolname="), - Literal(group), - SQL(";"), - ]) - ) - for group in ACCESS_GROUPS - ) - ] - if not users_exist: - index = 1 - for group in ACCESS_GROUPS: - calls.insert(index, call(SQL("CREATE ROLE {} NOLOGIN;").format(Identifier(group)))) - index += 2 - execute.assert_has_calls(calls) - - -def test_create_database(harness): - with ( - patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL.enable_disable_extensions" - ) as _enable_disable_extensions, - patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._generate_database_privileges_statements" - ) as _generate_database_privileges_statements, - patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" - ) as _connect_to_database, - ): - # Test a successful database creation. - database = "test_database" - user = "test_user" - plugins = ["test_plugin_1", "test_plugin_2"] - with harness.hooks_disabled(): - rel_id = harness.add_relation("database", "application") - harness.add_relation_unit(rel_id, "application/0") - harness.update_relation_data(rel_id, "application", {"database": database}) - database_relation = harness.model.get_relation("database") - client_relations = [database_relation] - schemas = [("test_schema_1",), ("test_schema_2",)] - _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.fetchall.return_value = schemas - harness.charm.postgresql.create_database(database, user, plugins, client_relations) - execute = _connect_to_database.return_value.cursor.return_value.execute - execute.assert_has_calls([ - call( - Composed([ - SQL("REVOKE ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" FROM PUBLIC;"), - ]) - ), - call( - Composed([ - SQL("GRANT ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" TO "), - Identifier(user), - SQL(";"), - ]) - ), - call( - Composed([ - SQL("GRANT ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" TO "), - Identifier(PERMISSIONS_GROUP_ADMIN), - SQL(";"), - ]) - ), - call( - Composed([ - SQL("GRANT ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" TO "), - Identifier(BACKUP_USER), - SQL(";"), - ]) - ), - call( - Composed([ - SQL("GRANT ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" TO "), - Identifier(REPLICATION_USER), - SQL(";"), - ]) - ), - call( - Composed([ - SQL("GRANT ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" TO "), - Identifier(REWIND_USER), - SQL(";"), - ]) - ), - call( - Composed([ - SQL("GRANT ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" TO "), - Identifier(USER), - SQL(";"), - ]) - ), - call( - Composed([ - SQL("GRANT ALL PRIVILEGES ON DATABASE "), - Identifier(database), - SQL(" TO "), - Identifier(MONITORING_USER), - SQL(";"), - ]) - ), - ]) - _generate_database_privileges_statements.assert_called_once_with( - 1, [schemas[0][0], schemas[1][0]], user - ) - _enable_disable_extensions.assert_called_once_with( - {plugins[0]: True, plugins[1]: True}, database - ) - - # Test when two relations request the same database. - _connect_to_database.reset_mock() - _generate_database_privileges_statements.reset_mock() - with harness.hooks_disabled(): - other_rel_id = harness.add_relation("database", "other-application") - harness.add_relation_unit(other_rel_id, "other-application/0") - harness.update_relation_data(other_rel_id, "other-application", {"database": database}) - other_database_relation = harness.model.get_relation("database", other_rel_id) - client_relations = [database_relation, other_database_relation] - harness.charm.postgresql.create_database(database, user, plugins, client_relations) - _generate_database_privileges_statements.assert_called_once_with( - 2, [schemas[0][0], schemas[1][0]], user - ) - - # Test a failed database creation. - _enable_disable_extensions.reset_mock() - execute.side_effect = psycopg2.Error - try: - harness.charm.postgresql.create_database(database, user, plugins, client_relations) - assert False - except PostgreSQLCreateDatabaseError: - pass - _enable_disable_extensions.assert_not_called() - - -def test_grant_internal_access_group_memberships(harness): - with patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" - ) as _connect_to_database: - execute = _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.execute - harness.charm.postgresql.grant_internal_access_group_memberships() - - internal_group = Identifier(ACCESS_GROUP_INTERNAL) - - execute.assert_has_calls([ - *( - call(SQL("GRANT {} TO {};").format(internal_group, Identifier(user))) - for user in SYSTEM_USERS - ), - ]) - - -def test_grant_relation_access_group_memberships(harness): - with patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" - ) as _connect_to_database: - execute = _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.execute - harness.charm.postgresql.grant_relation_access_group_memberships() - - execute.assert_has_calls([ - call( - "SELECT usename " - "FROM pg_catalog.pg_user " - "WHERE usename LIKE 'relation_id_%' OR usename LIKE 'relation-%' " - "OR usename LIKE 'pgbouncer_auth_relation_%' OR usename LIKE '%_user_%_%' " - "OR usename LIKE 'logical_replication_relation_%';" - ), - ]) - - -def test_generate_database_privileges_statements(harness): - # Test with only one established relation. - assert harness.charm.postgresql._generate_database_privileges_statements( - 1, ["test_schema_1", "test_schema_2"], "test_user" - ) == [ - Composed([ - SQL( - "DO $$\nDECLARE r RECORD;\nBEGIN\n FOR r IN (SELECT statement FROM (SELECT 1 AS index,'ALTER TABLE '|| schemaname || '.\"' || tablename ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema')\nUNION SELECT 2 AS index,'ALTER SEQUENCE '|| sequence_schema || '.\"' || sequence_name ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM information_schema.sequences WHERE NOT sequence_schema IN ('pg_catalog', 'information_schema')\nUNION SELECT 3 AS index,'ALTER FUNCTION '|| nsp.nspname || '.\"' || p.proname ||'\"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'f'\nUNION SELECT 4 AS index,'ALTER PROCEDURE '|| nsp.nspname || '.\"' || p.proname ||'\"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'p'\nUNION SELECT 5 AS index,'ALTER AGGREGATE '|| nsp.nspname || '.\"' || p.proname ||'\"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema') AND p.prokind = 'a'\nUNION SELECT 6 AS index,'ALTER VIEW '|| schemaname || '.\"' || viewname ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_catalog.pg_views WHERE NOT schemaname IN ('pg_catalog', 'information_schema')) AS statements ORDER BY index) LOOP\n EXECUTE format(r.statement);\n END LOOP;\nEND; $$;" - ), - ]), - Composed([ - SQL( - "UPDATE pg_catalog.pg_largeobject_metadata\nSET lomowner = (SELECT oid FROM pg_roles WHERE rolname = " - ), - Literal("test_user"), - SQL(")\nWHERE lomowner = (SELECT oid FROM pg_roles WHERE rolname = "), - Literal("operator"), - SQL(");"), - ]), - Composed([ - SQL("ALTER SCHEMA "), - Identifier("test_schema_1"), - SQL(" OWNER TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("ALTER SCHEMA "), - Identifier("test_schema_2"), - SQL(" OWNER TO "), - Identifier("test_user"), - SQL(";"), - ]), - ] - # Test with multiple established relations. - assert harness.charm.postgresql._generate_database_privileges_statements( - 2, ["test_schema_1", "test_schema_2"], "test_user" - ) == [ - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT USAGE ON SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT CREATE ON SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT USAGE ON SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT CREATE ON SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - ] - - -def test_get_last_archived_wal(harness): - with patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" - ) as _connect_to_database: - # Test a successful call. - execute = _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.execute - _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.fetchone.return_value = ( - "000000010000000100000001", - ) - assert harness.charm.postgresql.get_last_archived_wal() == "000000010000000100000001" - execute.assert_called_once_with("SELECT last_archived_wal FROM pg_stat_archiver;") - - # Test a failed call. - execute.reset_mock() - execute.side_effect = psycopg2.Error - try: - harness.charm.postgresql.get_last_archived_wal() - assert False - except PostgreSQLGetLastArchivedWALError: - pass - execute.assert_called_once_with("SELECT last_archived_wal FROM pg_stat_archiver;") - - -def test_build_postgresql_group_map(harness): - assert harness.charm.postgresql.build_postgresql_group_map(None) == [] - assert harness.charm.postgresql.build_postgresql_group_map("ldap_group=admin") == [] - - for group in ACCESS_GROUPS: - assert harness.charm.postgresql.build_postgresql_group_map(f"ldap_group={group}") == [] - - mapping_1 = "ldap_group_1=psql_group_1" - mapping_2 = "ldap_group_2=psql_group_2" - - assert harness.charm.postgresql.build_postgresql_group_map(f"{mapping_1},{mapping_2}") == [ - ("ldap_group_1", "psql_group_1"), - ("ldap_group_2", "psql_group_2"), - ] - try: - harness.charm.postgresql.build_postgresql_group_map(f"{mapping_1} {mapping_2}") - assert False - except ValueError: - assert True - - -def test_build_postgresql_parameters(harness): - # Test when not limit is imposed to the available memory. - config_options = { - "durability_test_config_option_1": True, - "instance_test_config_option_2": False, - "logging_test_config_option_3": "on", - "memory_test_config_option_4": 1024, - "optimizer_test_config_option_5": "scheduled", - "other_test_config_option_6": "test-value", - "profile": "production", - "request_date_style": "ISO, DMY", - "request_time_zone": "UTC", - "request_test_config_option_7": "off", - "response_test_config_option_8": "partial", - "vacuum_test_config_option_9": 10.5, - } - assert harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) == { - "test_config_option_1": True, - "test_config_option_2": False, - "test_config_option_3": "on", - "test_config_option_4": 1024, - "test_config_option_5": "scheduled", - "test_config_option_7": "off", - "DateStyle": "ISO, DMY", - "TimeZone": "UTC", - "test_config_option_8": "partial", - "test_config_option_9": 10.5, - "shared_buffers": f"{250 * 128}", - "effective_cache_size": f"{750 * 128}", - } - - # Test with a limited imposed to the available memory. - parameters = harness.charm.postgresql.build_postgresql_parameters( - config_options, 1000000000, 600000000 - ) - assert parameters["shared_buffers"] == f"{150 * 128}" - assert parameters["effective_cache_size"] == f"{450 * 128}" - - # Test when the requested shared buffers are greater than 40% of the available memory. - config_options["memory_shared_buffers"] = 50001 - try: - harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - assert False - except AssertionError as e: - raise e - except Exception: - pass - - # Test when the requested shared buffers are lower than 40% of the available memory - # (also check that it's used when calculating the effective cache size value). - config_options["memory_shared_buffers"] = 50000 - parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - assert parameters["shared_buffers"] == 50000 - assert parameters["effective_cache_size"] == f"{600 * 128}" - - # Test when the profile is set to "testing". - config_options["profile"] = "testing" - parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - assert parameters["shared_buffers"] == 50000 - assert "effective_cache_size" not in parameters - - # Test when there is no shared_buffers value set in the config option. - del config_options["memory_shared_buffers"] - parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - assert "shared_buffers" not in parameters - assert "effective_cache_size" not in parameters - - -def test_configure_pgaudit(harness): - with patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" - ) as _connect_to_database: - # Test when pgAudit is enabled. - execute = ( - _connect_to_database.return_value.cursor.return_value.__enter__.return_value.execute - ) - harness.charm.postgresql._configure_pgaudit(True) - execute.assert_has_calls([ - call("ALTER SYSTEM SET pgaudit.log = 'ROLE,DDL,MISC,MISC_SET';"), - call("ALTER SYSTEM SET pgaudit.log_client TO off;"), - call("ALTER SYSTEM SET pgaudit.log_parameter TO off;"), - call("SELECT pg_reload_conf();"), - ]) - - # Test when pgAudit is disabled. - execute.reset_mock() - harness.charm.postgresql._configure_pgaudit(False) - execute.assert_has_calls([ - call("ALTER SYSTEM RESET pgaudit.log;"), - call("ALTER SYSTEM RESET pgaudit.log_client;"), - call("ALTER SYSTEM RESET pgaudit.log_parameter;"), - call("SELECT pg_reload_conf();"), - ]) - - -def test_validate_group_map(harness): - with patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" - ) as _connect_to_database: - execute = _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.execute - _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.fetchone.return_value = None - - query = SQL("SELECT TRUE FROM pg_roles WHERE rolname={};") - - assert harness.charm.postgresql.validate_group_map(None) is True - - assert harness.charm.postgresql.validate_group_map("") is False - assert harness.charm.postgresql.validate_group_map("ldap_group=") is False - execute.assert_has_calls([ - call(query.format(Literal(""))), - ]) - - assert harness.charm.postgresql.validate_group_map("ldap_group=admin") is True - assert harness.charm.postgresql.validate_group_map("ldap_group=admin,") is False - assert harness.charm.postgresql.validate_group_map("ldap_group admin") is False - - assert harness.charm.postgresql.validate_group_map("ldap_group=missing_group") is False - execute.assert_has_calls([ - call(query.format(Literal("missing_group"))), - ]) diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index 4c86d29622..a8d99cbaf8 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -4,16 +4,16 @@ from unittest.mock import Mock, PropertyMock, patch, sentinel import pytest -from charms.postgresql_k8s.v0.postgresql import ( +from ops import Unit +from ops.framework import EventBase +from ops.model import ActiveStatus, BlockedStatus +from ops.testing import Harness +from single_kernel_postgresql.utils.postgresql import ( ACCESS_GROUP_RELATION, PostgreSQLCreateDatabaseError, PostgreSQLCreateUserError, PostgreSQLGetPostgreSQLVersionError, ) -from ops import Unit -from ops.framework import EventBase -from ops.model import ActiveStatus, BlockedStatus -from ops.testing import Harness from charm import PostgresqlOperatorCharm from constants import PEER