diff --git a/linode_api4/groups/database.py b/linode_api4/groups/database.py index 8110ea88..5cb90ccf 100644 --- a/linode_api4/groups/database.py +++ b/linode_api4/groups/database.py @@ -1,3 +1,9 @@ +from typing import Any, Dict, Union + +from linode_api4 import ( + MySQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigOptions, +) from linode_api4.errors import UnexpectedResponseError from linode_api4.groups import Group from linode_api4.objects import ( @@ -63,6 +69,26 @@ def engines(self, *filters): """ return self.client._get_and_filter(DatabaseEngine, *filters) + def mysql_config_options(self): + """ + Returns a detailed list of all the configuration options for MySQL Databases. + + API Documentation: TODO + + :returns: The JSON configuration options for MySQL Databases. + """ + return self.client.get("databases/mysql/config", model=self) + + def postgresql_config_options(self): + """ + Returns a detailed list of all the configuration options for PostgreSQL Databases. + + API Documentation: TODO + + :returns: The JSON configuration options for PostgreSQL Databases. + """ + return self.client.get("databases/postgresql/config", model=self) + def instances(self, *filters): """ Returns a list of Managed Databases active on this account. @@ -93,7 +119,15 @@ def mysql_instances(self, *filters): """ return self.client._get_and_filter(MySQLDatabase, *filters) - def mysql_create(self, label, region, engine, ltype, **kwargs): + def mysql_create( + self, + label, + region, + engine, + ltype, + engine_config: Union[MySQLDatabaseConfigOptions, Dict[str, Any]] = None, + **kwargs, + ): """ Creates an :any:`MySQLDatabase` on this account with the given label, region, engine, and node type. For example:: @@ -123,6 +157,8 @@ def mysql_create(self, label, region, engine, ltype, **kwargs): :type engine: str or Engine :param ltype: The Linode Type to use for this cluster :type ltype: str or Type + :param engine_config: The configuration options for this MySQL cluster + :type engine_config: Dict[str, Any] or MySQLDatabaseConfigOptions """ params = { @@ -130,6 +166,7 @@ def mysql_create(self, label, region, engine, ltype, **kwargs): "region": region, "engine": engine, "type": ltype, + "engine_config": engine_config, } params.update(kwargs) @@ -216,7 +253,17 @@ def postgresql_instances(self, *filters): """ return self.client._get_and_filter(PostgreSQLDatabase, *filters) - def postgresql_create(self, label, region, engine, ltype, **kwargs): + def postgresql_create( + self, + label, + region, + engine, + ltype, + engine_config: Union[ + PostgreSQLDatabaseConfigOptions, Dict[str, Any] + ] = None, + **kwargs, + ): """ Creates an :any:`PostgreSQLDatabase` on this account with the given label, region, engine, and node type. For example:: @@ -246,6 +293,8 @@ def postgresql_create(self, label, region, engine, ltype, **kwargs): :type engine: str or Engine :param ltype: The Linode Type to use for this cluster :type ltype: str or Type + :param engine_config: The configuration options for this PostgreSQL cluster + :type engine_config: Dict[str, Any] or PostgreSQLDatabaseConfigOptions """ params = { @@ -253,6 +302,7 @@ def postgresql_create(self, label, region, engine, ltype, **kwargs): "region": region, "engine": engine, "type": ltype, + "engine_config": engine_config, } params.update(kwargs) diff --git a/linode_api4/objects/database.py b/linode_api4/objects/database.py index dc9db847..39249bbf 100644 --- a/linode_api4/objects/database.py +++ b/linode_api4/objects/database.py @@ -1,6 +1,15 @@ +from dataclasses import dataclass, field +from typing import Optional + from deprecated import deprecated -from linode_api4.objects import Base, DerivedBase, MappedObject, Property +from linode_api4.objects import ( + Base, + DerivedBase, + JSONObject, + MappedObject, + Property, +) class DatabaseType(Base): @@ -128,6 +137,140 @@ class PostgreSQLDatabaseBackup(DatabaseBackup): api_endpoint = "/databases/postgresql/instances/{database_id}/backups/{id}" +@dataclass +class MySQLDatabaseConfigMySQLOptions(JSONObject): + """ + MySQLDatabaseConfigMySQLOptions represents the fields in the mysql + field of the MySQLDatabaseConfigOptions class + """ + + connect_timeout: Optional[int] = None + default_time_zone: Optional[str] = None + group_concat_max_len: Optional[float] = None + information_schema_stats_expiry: Optional[int] = None + innodb_change_buffer_max_size: Optional[int] = None + innodb_flush_neighbors: Optional[int] = None + innodb_ft_min_token_size: Optional[int] = None + innodb_ft_server_stopword_table: Optional[str] = None + innodb_lock_wait_timeout: Optional[int] = None + innodb_log_buffer_size: Optional[int] = None + innodb_online_alter_log_max_size: Optional[int] = None + innodb_read_io_threads: Optional[int] = None + innodb_rollback_on_timeout: Optional[bool] = None + innodb_thread_concurrency: Optional[int] = None + innodb_write_io_threads: Optional[int] = None + interactive_timeout: Optional[int] = None + internal_tmp_mem_storage_engine: Optional[str] = None + max_allowed_packet: Optional[int] = None + max_heap_table_size: Optional[int] = None + net_buffer_length: Optional[int] = None + net_read_timeout: Optional[int] = None + net_write_timeout: Optional[int] = None + sort_buffer_size: Optional[int] = None + sql_mode: Optional[str] = None + sql_require_primary_key: Optional[bool] = None + tmp_table_size: Optional[int] = None + wait_timeout: Optional[int] = None + + +@dataclass +class MySQLDatabaseConfigOptions(JSONObject): + """ + MySQLDatabaseConfigOptions is used to specify + a MySQL Database Cluster's configuration options during its creation. + """ + + mysql: Optional[MySQLDatabaseConfigMySQLOptions] = None + binlog_retention_period: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigPGLookoutOptions(JSONObject): + """ + PostgreSQLDatabasePGLookoutConfigOptions represents the fields in the pglookout + field of the PostgreSQLDatabasePGConfigOptions class + """ + + max_failover_replication_time_lag: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigPGOptions(JSONObject): + """ + PostgreSQLDatabasePGConfigOptions represents the fields in the pg + field of the PostgreSQLDatabasePGConfigOptions class + """ + + autovacuum_analyze_scale_factor: Optional[float] = None + autovacuum_analyze_threshold: Optional[int] = None + autovacuum_max_workers: Optional[int] = None + autovacuum_naptime: Optional[int] = None + autovacuum_vacuum_cost_delay: Optional[int] = None + autovacuum_vacuum_cost_limit: Optional[int] = None + autovacuum_vacuum_scale_factor: Optional[float] = None + autovacuum_vacuum_threshold: Optional[int] = None + bgwriter_delay: Optional[int] = None + bgwriter_flush_after: Optional[int] = None + bgwriter_lru_maxpages: Optional[int] = None + bgwriter_lru_multiplier: Optional[float] = None + deadlock_timeout: Optional[int] = None + default_toast_compression: Optional[str] = None + idle_in_transaction_session_timeout: Optional[int] = None + jit: Optional[bool] = None + max_files_per_process: Optional[int] = None + max_locks_per_transaction: Optional[int] = None + max_logical_replication_workers: Optional[int] = None + max_parallel_workers: Optional[int] = None + max_parallel_workers_per_gather: Optional[int] = None + max_pred_locks_per_transaction: Optional[int] = None + max_replication_slots: Optional[int] = None + max_slot_wal_keep_size: Optional[int] = None + max_stack_depth: Optional[int] = None + max_standby_archive_delay: Optional[int] = None + max_standby_streaming_delay: Optional[int] = None + max_wal_senders: Optional[int] = None + max_worker_processes: Optional[int] = None + password_encryption: Optional[str] = None + pg_partman_bgw_interval: Optional[int] = field( + default=None, metadata={"json_key": "pg_partman_bgw.interval"} + ) + pg_partman_bgw_role: Optional[str] = field( + default=None, metadata={"json_key": "pg_partman_bgw.role"} + ) + pg_stat_monitor_pgsm_enable_query_plan: Optional[bool] = field( + default=None, + metadata={"json_key": "pg_stat_monitor.pgsm_enable_query_plan"}, + ) + pg_stat_monitor_pgsm_max_buckets: Optional[int] = field( + default=None, metadata={"json_key": "pg_stat_monitor.pgsm_max_buckets"} + ) + pg_stat_statements_track: Optional[str] = field( + default=None, metadata={"json_key": "pg_stat_statements.track"} + ) + temp_file_limit: Optional[int] = None + timezone: Optional[str] = None + track_activity_query_size: Optional[int] = None + track_commit_timestamp: Optional[str] = None + track_functions: Optional[str] = None + track_io_timing: Optional[str] = None + wal_sender_timeout: Optional[int] = None + wal_writer_delay: Optional[int] = None + + +@dataclass +class PostgreSQLDatabaseConfigOptions(JSONObject): + """ + PostgreSQLDatabaseConfigOptions is used to specify + a PostgreSQL Database Cluster's configuration options during its creation. + """ + + pg: Optional[PostgreSQLDatabaseConfigPGOptions] = None + pg_stat_monitor_enable: Optional[bool] = None + pglookout: Optional[PostgreSQLDatabaseConfigPGLookoutOptions] = None + shared_buffers_percentage: Optional[float] = None + work_mem: Optional[int] = None + + class MySQLDatabase(Base): """ An accessible Managed MySQL Database. @@ -158,6 +301,9 @@ class MySQLDatabase(Base): "updated": Property(volatile=True, is_datetime=True), "updates": Property(mutable=True), "version": Property(), + "engine_config": Property( + mutable=True, json_object=MySQLDatabaseConfigOptions + ), } @property @@ -321,6 +467,9 @@ class PostgreSQLDatabase(Base): "updated": Property(volatile=True, is_datetime=True), "updates": Property(mutable=True), "version": Property(), + "engine_config": Property( + mutable=True, json_object=PostgreSQLDatabaseConfigOptions + ), } @property diff --git a/linode_api4/objects/serializable.py b/linode_api4/objects/serializable.py index fea682f4..06328033 100644 --- a/linode_api4/objects/serializable.py +++ b/linode_api4/objects/serializable.py @@ -1,5 +1,6 @@ import inspect from dataclasses import dataclass +from dataclasses import fields as dataclass_fields from enum import Enum from types import SimpleNamespace from typing import ( @@ -140,7 +141,7 @@ def _parse_attr(cls, json_value: Any, field_type: type): @classmethod def from_json(cls, json: Dict[str, Any]) -> Optional["JSONObject"]: """ - Creates an instance of this class from a JSON dict. + Creates an instance of this class from a JSON dict, respecting json_key metadata. """ if json is None: return None @@ -149,8 +150,12 @@ def from_json(cls, json: Dict[str, Any]) -> Optional["JSONObject"]: type_hints = get_type_hints(cls) - for k in vars(obj): - setattr(obj, k, cls._parse_attr(json.get(k), type_hints.get(k))) + for f in dataclass_fields(cls): + json_key = f.metadata.get("json_key", f.name) + field_type = type_hints.get(f.name) + value = json.get(json_key) + parsed_value = cls._parse_attr(value, field_type) + setattr(obj, f.name, parsed_value) return obj @@ -193,7 +198,11 @@ def should_include(key: str, value: Any) -> bool: result = {} - for k, v in vars(self).items(): + for f in dataclass_fields(self): + k = f.name + json_key = f.metadata.get("json_key", k) + v = getattr(self, k) + if not should_include(k, v): continue @@ -204,7 +213,7 @@ def should_include(key: str, value: Any) -> bool: else: v = attempt_serialize(v) - result[k] = v + result[json_key] = v return result diff --git a/test/fixtures/databases_mysql_config.json b/test/fixtures/databases_mysql_config.json new file mode 100644 index 00000000..9cba0afd --- /dev/null +++ b/test/fixtures/databases_mysql_config.json @@ -0,0 +1,230 @@ +{ + "mysql": { + "connect_timeout": { + "description": "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + "example": 10, + "maximum": 3600, + "minimum": 2, + "requires_restart": false, + "type": "integer" + }, + "default_time_zone": { + "description": "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + "example": "+03:00", + "maxLength": 100, + "minLength": 2, + "pattern": "^([-+][\\d:]*|[\\w/]*)$", + "requires_restart": false, + "type": "string" + }, + "group_concat_max_len": { + "description": "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + "example": 1024, + "maximum": 18446744073709551600, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "information_schema_stats_expiry": { + "description": "The time, in seconds, before cached statistics expire", + "example": 86400, + "maximum": 31536000, + "minimum": 900, + "requires_restart": false, + "type": "integer" + }, + "innodb_change_buffer_max_size": { + "description": "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + "example": 30, + "maximum": 50, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_flush_neighbors": { + "description": "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + "example": 0, + "maximum": 2, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_ft_min_token_size": { + "description": "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + "example": 3, + "maximum": 16, + "minimum": 0, + "requires_restart": true, + "type": "integer" + }, + "innodb_ft_server_stopword_table": { + "description": "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + "example": "db_name/table_name", + "maxLength": 1024, + "pattern": "^.+/.+$", + "requires_restart": false, + "type": [ + "null", + "string" + ] + }, + "innodb_lock_wait_timeout": { + "description": "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + "example": 50, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "innodb_log_buffer_size": { + "description": "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + "example": 16777216, + "maximum": 4294967295, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "innodb_online_alter_log_max_size": { + "description": "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + "example": 134217728, + "maximum": 1099511627776, + "minimum": 65536, + "requires_restart": false, + "type": "integer" + }, + "innodb_read_io_threads": { + "description": "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "innodb_rollback_on_timeout": { + "description": "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + "example": true, + "requires_restart": true, + "type": "boolean" + }, + "innodb_thread_concurrency": { + "description": "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + "example": 10, + "maximum": 1000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_write_io_threads": { + "description": "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "interactive_timeout": { + "description": "The number of seconds the server waits for activity on an interactive connection before closing it.", + "example": 3600, + "maximum": 604800, + "minimum": 30, + "requires_restart": false, + "type": "integer" + }, + "internal_tmp_mem_storage_engine": { + "description": "The storage engine for in-memory internal temporary tables.", + "enum": [ + "TempTable", + "MEMORY" + ], + "example": "TempTable", + "requires_restart": false, + "type": "string" + }, + "max_allowed_packet": { + "description": "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + "example": 67108864, + "maximum": 1073741824, + "minimum": 102400, + "requires_restart": false, + "type": "integer" + }, + "max_heap_table_size": { + "description": "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "net_buffer_length": { + "description": "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + "example": 16384, + "maximum": 1048576, + "minimum": 1024, + "requires_restart": true, + "type": "integer" + }, + "net_read_timeout": { + "description": "The number of seconds to wait for more data from a connection before aborting the read.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "net_write_timeout": { + "description": "The number of seconds to wait for a block to be written to a connection before aborting the write.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "sort_buffer_size": { + "description": "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + "example": 262144, + "maximum": 1073741824, + "minimum": 32768, + "requires_restart": false, + "type": "integer" + }, + "sql_mode": { + "description": "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Akamai default SQL mode (strict, SQL standard compliant) will be assigned.", + "example": "ANSI,TRADITIONAL", + "maxLength": 1024, + "pattern": "^[A-Z_]*(,[A-Z_]+)*$", + "requires_restart": false, + "type": "string" + }, + "sql_require_primary_key": { + "description": "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "tmp_table_size": { + "description": "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "wait_timeout": { + "description": "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + "example": 28800, + "maximum": 2147483, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } + }, + "binlog_retention_period": { + "description": "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + "example": 600, + "maximum": 86400, + "minimum": 600, + "requires_restart": false, + "type": "integer" + } +} \ No newline at end of file diff --git a/test/fixtures/databases_mysql_instances.json b/test/fixtures/databases_mysql_instances.json index 2ea73ddc..d6e3f2e6 100644 --- a/test/fixtures/databases_mysql_instances.json +++ b/test/fixtures/databases_mysql_instances.json @@ -29,7 +29,39 @@ "hour_of_day": 0, "week_of_month": null }, - "version": "8.0.26" + "version": "8.0.26", + "engine_config": { + "binlog_retention_period": 600, + "mysql": { + "connect_timeout": 10, + "default_time_zone": "+03:00", + "group_concat_max_len": 1024, + "information_schema_stats_expiry": 86400, + "innodb_change_buffer_max_size": 30, + "innodb_flush_neighbors": 0, + "innodb_ft_min_token_size": 3, + "innodb_ft_server_stopword_table": "db_name/table_name", + "innodb_lock_wait_timeout": 50, + "innodb_log_buffer_size": 16777216, + "innodb_online_alter_log_max_size": 134217728, + "innodb_read_io_threads": 10, + "innodb_rollback_on_timeout": true, + "innodb_thread_concurrency": 10, + "innodb_write_io_threads": 10, + "interactive_timeout": 3600, + "internal_tmp_mem_storage_engine": "TempTable", + "max_allowed_packet": 67108864, + "max_heap_table_size": 16777216, + "net_buffer_length": 16384, + "net_read_timeout": 30, + "net_write_timeout": 30, + "sort_buffer_size": 262144, + "sql_mode": "ANSI,TRADITIONAL", + "sql_require_primary_key": true, + "tmp_table_size": 16777216, + "wait_timeout": 28800 + } + } } ], "page": 1, diff --git a/test/fixtures/databases_postgresql_config.json b/test/fixtures/databases_postgresql_config.json new file mode 100644 index 00000000..9a93d0aa --- /dev/null +++ b/test/fixtures/databases_postgresql_config.json @@ -0,0 +1,367 @@ +{ + "pg": { + "autovacuum_analyze_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_analyze_threshold": { + "description": "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_max_workers": { + "description": "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + "maximum": 20, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_naptime": { + "description": "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + "maximum": 86400, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_delay": { + "description": "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + "maximum": 100, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_limit": { + "description": "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + "maximum": 10000, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_vacuum_threshold": { + "description": "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_delay": { + "description": "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + "example": 200, + "maximum": 10000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_flush_after": { + "description": "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + "example": 512, + "maximum": 2048, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_maxpages": { + "description": "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + "example": 100, + "maximum": 1073741823, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_multiplier": { + "description": "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a \u201cjust in time\u201d policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + "example": 2.0, + "maximum": 10, + "minimum": 0, + "requires_restart": false, + "type": "number" + }, + "deadlock_timeout": { + "description": "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + "example": 1000, + "maximum": 1800000, + "minimum": 500, + "requires_restart": false, + "type": "integer" + }, + "default_toast_compression": { + "description": "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + "enum": [ + "lz4", + "pglz" + ], + "example": "lz4", + "requires_restart": false, + "type": "string" + }, + "idle_in_transaction_session_timeout": { + "description": "Time out sessions with open transactions after this number of milliseconds", + "maximum": 604800000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "jit": { + "description": "Controls system-wide use of Just-in-Time Compilation (JIT).", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "max_files_per_process": { + "description": "PostgreSQL maximum number of files that can be open per process", + "maximum": 4096, + "minimum": 1000, + "requires_restart": false, + "type": "integer" + }, + "max_locks_per_transaction": { + "description": "PostgreSQL maximum locks per transaction", + "maximum": 6400, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_logical_replication_workers": { + "description": "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + "maximum": 64, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers": { + "description": "Sets the maximum number of workers that the system can support for parallel queries", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers_per_gather": { + "description": "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_pred_locks_per_transaction": { + "description": "PostgreSQL maximum predicate locks per transaction", + "maximum": 5120, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_replication_slots": { + "description": "PostgreSQL maximum replication slots", + "maximum": 64, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "max_slot_wal_keep_size": { + "description": "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "max_stack_depth": { + "description": "Maximum depth of the stack in bytes", + "maximum": 6291456, + "minimum": 2097152, + "requires_restart": false, + "type": "integer" + }, + "max_standby_archive_delay": { + "description": "Max standby archive delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_standby_streaming_delay": { + "description": "Max standby streaming delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_wal_senders": { + "description": "PostgreSQL maximum WAL senders", + "maximum": 64, + "minimum": 20, + "requires_restart": false, + "type": "integer" + }, + "max_worker_processes": { + "description": "Sets the maximum number of background processes that the system can support", + "maximum": 96, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "password_encryption": { + "description": "Chooses the algorithm for encrypting passwords.", + "enum": [ + "md5", + "scram-sha-256" + ], + "example": "scram-sha-256", + "requires_restart": false, + "type": [ + "string", + "null" + ] + }, + "pg_partman_bgw.interval": { + "description": "Sets the time interval to run pg_partman's scheduled tasks", + "example": 3600, + "maximum": 604800, + "minimum": 3600, + "requires_restart": false, + "type": "integer" + }, + "pg_partman_bgw.role": { + "description": "Controls which role to use for pg_partman's scheduled background tasks.", + "example": "myrolename", + "maxLength": 64, + "pattern": "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", + "requires_restart": false, + "type": "string" + }, + "pg_stat_monitor.pgsm_enable_query_plan": { + "description": "Enables or disables query plan monitoring", + "example": false, + "requires_restart": false, + "type": "boolean" + }, + "pg_stat_monitor.pgsm_max_buckets": { + "description": "Sets the maximum number of buckets", + "example": 10, + "maximum": 10, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "pg_stat_statements.track": { + "description": "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + "enum": [ + "all", + "top", + "none" + ], + "requires_restart": false, + "type": [ + "string" + ] + }, + "temp_file_limit": { + "description": "PostgreSQL temporary file limit in KiB, -1 for unlimited", + "example": 5000000, + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "timezone": { + "description": "PostgreSQL service timezone", + "example": "Europe/Helsinki", + "maxLength": 64, + "pattern": "^[\\w/]*$", + "requires_restart": false, + "type": "string" + }, + "track_activity_query_size": { + "description": "Specifies the number of bytes reserved to track the currently executing command for each active session.", + "example": 1024, + "maximum": 10240, + "minimum": 1024, + "requires_restart": false, + "type": "integer" + }, + "track_commit_timestamp": { + "description": "Record commit time of transactions.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "track_functions": { + "description": "Enables tracking of function call counts and time used.", + "enum": [ + "all", + "pl", + "none" + ], + "requires_restart": false, + "type": "string" + }, + "track_io_timing": { + "description": "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "wal_sender_timeout": { + "description": "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + "example": 60000, + "requires_restart": false, + "type": "integer" + }, + "wal_writer_delay": { + "description": "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + "example": 50, + "maximum": 200, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "pg_stat_monitor_enable": { + "description": "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted. When this extension is enabled, pg_stat_statements results for utility commands are unreliable", + "requires_restart": true, + "type": "boolean" + }, + "pglookout": { + "max_failover_replication_time_lag": { + "description": "Number of seconds of master unavailability before triggering database failover to standby", + "maximum": 9223372036854775000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "shared_buffers_percentage": { + "description": "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + "example": 41.5, + "maximum": 60.0, + "minimum": 20.0, + "requires_restart": false, + "type": "number" + }, + "work_mem": { + "description": "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + "example": 4, + "maximum": 1024, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } +} \ No newline at end of file diff --git a/test/fixtures/databases_postgresql_instances.json b/test/fixtures/databases_postgresql_instances.json index 2740b836..92d5ce94 100644 --- a/test/fixtures/databases_postgresql_instances.json +++ b/test/fixtures/databases_postgresql_instances.json @@ -30,7 +30,60 @@ "hour_of_day": 0, "week_of_month": null }, - "version": "13.2" + "version": "13.2", + "engine_config": { + "pg": { + "autovacuum_analyze_scale_factor": 0.5, + "autovacuum_analyze_threshold": 100, + "autovacuum_max_workers": 10, + "autovacuum_naptime": 100, + "autovacuum_vacuum_cost_delay": 50, + "autovacuum_vacuum_cost_limit": 100, + "autovacuum_vacuum_scale_factor": 0.5, + "autovacuum_vacuum_threshold": 100, + "bgwriter_delay": 200, + "bgwriter_flush_after": 512, + "bgwriter_lru_maxpages": 100, + "bgwriter_lru_multiplier": 2.0, + "deadlock_timeout": 1000, + "default_toast_compression": "lz4", + "idle_in_transaction_session_timeout": 100, + "jit": true, + "max_files_per_process": 100, + "max_locks_per_transaction": 100, + "max_logical_replication_workers": 32, + "max_parallel_workers": 64, + "max_parallel_workers_per_gather": 64, + "max_pred_locks_per_transaction": 1000, + "max_replication_slots": 32, + "max_slot_wal_keep_size": 100, + "max_stack_depth": 3507152, + "max_standby_archive_delay": 1000, + "max_standby_streaming_delay": 1000, + "max_wal_senders": 32, + "max_worker_processes": 64, + "password_encryption": "scram-sha-256", + "pg_partman_bgw.interval": 3600, + "pg_partman_bgw.role": "myrolename", + "pg_stat_monitor.pgsm_enable_query_plan": false, + "pg_stat_monitor.pgsm_max_buckets": 10, + "pg_stat_statements.track": "top", + "temp_file_limit": 5000000, + "timezone": "Europe/Helsinki", + "track_activity_query_size": 1024, + "track_commit_timestamp": "off", + "track_functions": "all", + "track_io_timing": "off", + "wal_sender_timeout": 60000, + "wal_writer_delay": 50 + }, + "pg_stat_monitor_enable": true, + "pglookout": { + "max_failover_replication_time_lag": 1000 + }, + "shared_buffers_percentage": 41.5, + "work_mem": 4 + } } ], "page": 1, diff --git a/test/integration/models/database/helpers.py b/test/integration/models/database/helpers.py new file mode 100644 index 00000000..936425f5 --- /dev/null +++ b/test/integration/models/database/helpers.py @@ -0,0 +1,116 @@ +from linode_api4 import LinodeClient +from linode_api4.objects import ( + MySQLDatabase, + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) + + +# Test Helpers +def get_db_engine_id(client: LinodeClient, engine: str): + engines = client.database.engines() + engine_id = "" + for e in engines: + if e.engine == engine: + engine_id = e.id + + return str(engine_id) + + +def get_sql_db_status(client: LinodeClient, db_id, status: str): + db = client.load(MySQLDatabase, db_id) + return db.status == status + + +def get_postgres_db_status(client: LinodeClient, db_id, status: str): + db = client.load(PostgreSQLDatabase, db_id) + return db.status == status + + +def make_full_mysql_engine_config(): + return MySQLDatabaseConfigOptions( + binlog_retention_period=600, + mysql=MySQLDatabaseConfigMySQLOptions( + connect_timeout=20, + default_time_zone="+00:00", + group_concat_max_len=1024, + information_schema_stats_expiry=900, + innodb_change_buffer_max_size=25, + innodb_flush_neighbors=1, + innodb_ft_min_token_size=3, + innodb_ft_server_stopword_table="db_name/table_name", + innodb_lock_wait_timeout=50, + innodb_log_buffer_size=16777216, + innodb_online_alter_log_max_size=134217728, + innodb_read_io_threads=4, + innodb_rollback_on_timeout=True, + innodb_thread_concurrency=8, + innodb_write_io_threads=4, + interactive_timeout=300, + internal_tmp_mem_storage_engine="TempTable", + max_allowed_packet=67108864, + max_heap_table_size=16777216, + net_buffer_length=16384, + net_read_timeout=30, + net_write_timeout=60, + sort_buffer_size=262144, + sql_mode="TRADITIONAL", + sql_require_primary_key=False, + tmp_table_size=16777216, + wait_timeout=28800, + ), + ) + + +def make_full_postgres_engine_config(): + return PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.1, + autovacuum_analyze_threshold=50, + autovacuum_max_workers=3, + autovacuum_naptime=60, + autovacuum_vacuum_cost_delay=20, + autovacuum_vacuum_cost_limit=200, + autovacuum_vacuum_scale_factor=0.2, + autovacuum_vacuum_threshold=50, + bgwriter_delay=200, + bgwriter_flush_after=64, + bgwriter_lru_maxpages=100, + bgwriter_lru_multiplier=2.0, + deadlock_timeout=1000, + default_toast_compression="lz4", + idle_in_transaction_session_timeout=600000, + jit=True, + max_files_per_process=1000, + max_locks_per_transaction=64, + max_logical_replication_workers=4, + max_parallel_workers=4, + max_parallel_workers_per_gather=2, + max_pred_locks_per_transaction=64, + max_replication_slots=10, + max_slot_wal_keep_size=2048, + max_stack_depth=6291456, + max_standby_archive_delay=30000, + max_standby_streaming_delay=30000, + max_wal_senders=20, + max_worker_processes=8, + password_encryption="scram-sha-256", + temp_file_limit=1, + timezone="UTC", + track_activity_query_size=2048, + track_functions="all", + wal_sender_timeout=60000, + wal_writer_delay=200, + pg_partman_bgw_interval=3600, + pg_partman_bgw_role="myrolename", + pg_stat_monitor_pgsm_enable_query_plan=True, + pg_stat_monitor_pgsm_max_buckets=2, + pg_stat_statements_track="top", + ), + pg_stat_monitor_enable=True, + shared_buffers_percentage=25.0, + work_mem=1024, + ) diff --git a/test/integration/models/database/test_database.py b/test/integration/models/database/test_database.py index 351c09c2..dbb763c5 100644 --- a/test/integration/models/database/test_database.py +++ b/test/integration/models/database/test_database.py @@ -5,34 +5,17 @@ send_request_when_resource_available, wait_for_condition, ) +from test.integration.models.database.helpers import ( + get_db_engine_id, + get_postgres_db_status, + get_sql_db_status, +) import pytest -from linode_api4 import LinodeClient from linode_api4.objects import MySQLDatabase, PostgreSQLDatabase -# Test Helpers -def get_db_engine_id(client: LinodeClient, engine: str): - engines = client.database.engines() - engine_id = "" - for e in engines: - if e.engine == engine: - engine_id = e.id - - return str(engine_id) - - -def get_sql_db_status(client: LinodeClient, db_id, status: str): - db = client.load(MySQLDatabase, db_id) - return db.status == status - - -def get_postgres_db_status(client: LinodeClient, db_id, status: str): - db = client.load(PostgreSQLDatabase, db_id) - return db.status == status - - @pytest.fixture(scope="session") def test_create_sql_db(test_linode_client): client = test_linode_client diff --git a/test/integration/models/database/test_database_engine_config.py b/test/integration/models/database/test_database_engine_config.py new file mode 100644 index 00000000..cb366f03 --- /dev/null +++ b/test/integration/models/database/test_database_engine_config.py @@ -0,0 +1,423 @@ +import os +from test.integration.helpers import ( + get_test_label, + send_request_when_resource_available, + wait_for_condition, +) +from test.integration.models.database.helpers import ( + get_db_engine_id, + get_postgres_db_status, + get_sql_db_status, + make_full_mysql_engine_config, + make_full_postgres_engine_config, +) + +import pytest + +from linode_api4.errors import ApiError +from linode_api4.objects import ( + MySQLDatabase, + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) + + +@pytest.fixture(scope="session") +def mysql_db_with_engine_config(test_linode_client): + client = test_linode_client + label = get_test_label() + "-sqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "mysql") + dbtype = "g6-standard-1" + + db = client.database.mysql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_full_mysql_engine_config(), + ) + + def get_db_status(): + return db.status == "active" + + # Usually take 10-15m to provision + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +@pytest.fixture(scope="session") +def postgres_db_with_engine_config(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = "postgresql/17" + dbtype = "g6-standard-1" + + db = client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=make_full_postgres_engine_config(), + ) + + def get_db_status(): + return db.status == "active" + + # Usually take 10-15m to provision + wait_for_condition(60, 2000, get_db_status) + + yield db + + send_request_when_resource_available(300, db.delete) + + +# MYSQL +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_mysql_config(test_linode_client): + config = test_linode_client.database.mysql_config_options() + + # Top-level keys + assert "binlog_retention_period" in config + assert "mysql" in config + + # binlog_retention_period checks + brp = config["binlog_retention_period"] + assert isinstance(brp, dict) + assert brp["type"] == "integer" + assert brp["minimum"] == 600 + assert brp["maximum"] == 86400 + assert brp["requires_restart"] is False + + # mysql sub-keys + mysql = config["mysql"] + + # mysql valid fields + expected_keys = [ + "connect_timeout", + "default_time_zone", + "group_concat_max_len", + "information_schema_stats_expiry", + "innodb_change_buffer_max_size", + "innodb_flush_neighbors", + "innodb_ft_min_token_size", + "innodb_ft_server_stopword_table", + "innodb_lock_wait_timeout", + "innodb_log_buffer_size", + "innodb_online_alter_log_max_size", + "innodb_read_io_threads", + "innodb_rollback_on_timeout", + "innodb_thread_concurrency", + "innodb_write_io_threads", + "interactive_timeout", + "internal_tmp_mem_storage_engine", + "max_allowed_packet", + "max_heap_table_size", + "net_buffer_length", + "net_read_timeout", + "net_write_timeout", + "sort_buffer_size", + "sql_mode", + "sql_require_primary_key", + "tmp_table_size", + "wait_timeout", + ] + + # Assert all valid fields are present + for key in expected_keys: + assert key in mysql, f"{key} not found in mysql config" + + assert mysql["connect_timeout"]["type"] == "integer" + assert mysql["default_time_zone"]["type"] == "string" + assert mysql["innodb_rollback_on_timeout"]["type"] == "boolean" + assert "enum" in mysql["internal_tmp_mem_storage_engine"] + assert "pattern" in mysql["sql_mode"] + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_mysql_with_engine_config(mysql_db_with_engine_config): + db = mysql_db_with_engine_config + actual_config = db.engine_config.mysql + expected_config = make_full_mysql_engine_config().mysql.__dict__ + + for key, expected_value in expected_config.items(): + actual_value = getattr(actual_config, key) + assert ( + actual_value == expected_value + ), f"{key} mismatch: expected {expected_value}, got {actual_value}" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + db = mysql_db_with_engine_config + + db.updates.day_of_week = 2 + db.engine_config = MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions(connect_timeout=50), + binlog_retention_period=880, + ) + + db.save() + + wait_for_condition( + 30, + 300, + get_sql_db_status, + test_linode_client, + db.id, + "active", + ) + + database = test_linode_client.load(MySQLDatabase, db.id) + + assert database.updates.day_of_week == 2 + assert database.engine_config.mysql.connect_timeout == 50 + assert database.engine_config.binlog_retention_period == 880 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_list_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + dbs = test_linode_client.database.mysql_instances() + + db_ids = [db.id for db in dbs] + + assert mysql_db_with_engine_config.id in db_ids + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_mysql_engine_config( + test_linode_client, mysql_db_with_engine_config +): + db = test_linode_client.load(MySQLDatabase, mysql_db_with_engine_config.id) + + assert isinstance(db, MySQLDatabase) + + +# POSTGRESQL +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_config(test_linode_client): + config = test_linode_client.database.postgresql_config_options() + + # Top-level keys and structure + assert "pg" in config + + assert "pg_stat_monitor_enable" in config + assert config["pg_stat_monitor_enable"]["type"] == "boolean" + + assert "shared_buffers_percentage" in config + assert config["shared_buffers_percentage"]["type"] == "number" + assert config["shared_buffers_percentage"]["minimum"] >= 1 + + assert "work_mem" in config + assert config["work_mem"]["type"] == "integer" + assert "minimum" in config["work_mem"] + + pg = config["pg"] + + # postgres valid fields + expected_keys = [ + "autovacuum_analyze_scale_factor", + "autovacuum_analyze_threshold", + "autovacuum_max_workers", + "autovacuum_naptime", + "autovacuum_vacuum_cost_delay", + "autovacuum_vacuum_cost_limit", + "autovacuum_vacuum_scale_factor", + "autovacuum_vacuum_threshold", + "bgwriter_delay", + "bgwriter_flush_after", + "bgwriter_lru_maxpages", + "bgwriter_lru_multiplier", + "deadlock_timeout", + "default_toast_compression", + "idle_in_transaction_session_timeout", + "jit", + "max_files_per_process", + "max_locks_per_transaction", + "max_logical_replication_workers", + "max_parallel_workers", + "max_parallel_workers_per_gather", + "max_pred_locks_per_transaction", + "max_replication_slots", + "max_slot_wal_keep_size", + "max_stack_depth", + "max_standby_archive_delay", + "max_standby_streaming_delay", + "max_wal_senders", + "max_worker_processes", + "password_encryption", + "pg_partman_bgw.interval", + "pg_partman_bgw.role", + "pg_stat_monitor.pgsm_enable_query_plan", + "pg_stat_monitor.pgsm_max_buckets", + "pg_stat_statements.track", + "temp_file_limit", + "timezone", + "track_activity_query_size", + "track_commit_timestamp", + "track_functions", + "track_io_timing", + "wal_sender_timeout", + "wal_writer_delay", + ] + + # Assert all valid fields are present + for key in expected_keys: + assert key in pg, f"{key} not found in postgresql config" + + assert pg["autovacuum_analyze_scale_factor"]["type"] == "number" + assert pg["autovacuum_analyze_threshold"]["type"] == "integer" + assert pg["autovacuum_max_workers"]["requires_restart"] is True + assert pg["default_toast_compression"]["enum"] == ["lz4", "pglz"] + assert pg["jit"]["type"] == "boolean" + assert "enum" in pg["password_encryption"] + assert "pattern" in pg["pg_partman_bgw.role"] + assert pg["pg_stat_monitor.pgsm_enable_query_plan"]["type"] == "boolean" + assert pg["pg_stat_monitor.pgsm_max_buckets"]["requires_restart"] is True + assert pg["pg_stat_statements.track"]["enum"] == ["all", "top", "none"] + assert pg["track_commit_timestamp"]["enum"] == ["off", "on"] + assert pg["track_functions"]["enum"] == ["all", "pl", "none"] + assert pg["track_io_timing"]["enum"] == ["off", "on"] + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_postgres_with_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = postgres_db_with_engine_config + actual_config = db.engine_config.pg + expected_config = make_full_postgres_engine_config().pg.__dict__ + + for key, expected_value in expected_config.items(): + actual_value = getattr(actual_config, key, None) + assert ( + actual_value is None or actual_value == expected_value + ), f"{key} mismatch: expected {expected_value}, got {actual_value}" + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_update_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = postgres_db_with_engine_config + + db.updates.day_of_week = 2 + db.engine_config = PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_threshold=70, deadlock_timeout=2000 + ), + shared_buffers_percentage=25.0, + ) + + db.save() + + wait_for_condition( + 30, + 300, + get_postgres_db_status, + test_linode_client, + db.id, + "active", + ) + + database = test_linode_client.load(PostgreSQLDatabase, db.id) + + assert database.updates.day_of_week == 2 + assert database.engine_config.pg.autovacuum_analyze_threshold == 70 + assert database.engine_config.pg.deadlock_timeout == 2000 + assert database.engine_config.shared_buffers_percentage == 25.0 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_create_pg13_with_lz4_error(test_linode_client): + client = test_linode_client + label = get_test_label() + "-postgresqldb" + region = "us-ord" + engine_id = get_db_engine_id(client, "postgresql/13") + dbtype = "g6-standard-1" + + try: + client.database.postgresql_create( + label=label, + region=region, + engine=engine_id, + ltype=dbtype, + cluster_size=None, + engine_config=PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + default_toast_compression="lz4" + ), + work_mem=4, + ), + ) + except ApiError as e: + assert "An error occurred" in str(e.json) + assert e.status == 500 + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_list_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + dbs = test_linode_client.database.postgresql_instances() + + db_ids = [db.id for db in dbs] + + assert postgres_db_with_engine_config.id in db_ids + + +@pytest.mark.skipif( + os.getenv("RUN_DB_TESTS", "").strip().lower() not in {"yes", "true"}, + reason="RUN_DB_TESTS environment variable must be set to 'yes' or 'true' (case insensitive)", +) +def test_get_postgres_engine_config( + test_linode_client, postgres_db_with_engine_config +): + db = test_linode_client.load( + PostgreSQLDatabase, postgres_db_with_engine_config.id + ) + + assert isinstance(db, PostgreSQLDatabase) diff --git a/test/unit/groups/database_test.py b/test/unit/groups/database_test.py index 09d842b7..d1939aec 100644 --- a/test/unit/groups/database_test.py +++ b/test/unit/groups/database_test.py @@ -132,6 +132,1194 @@ def test_create(self): self.assertEqual(m.call_data["type"], "g6-standard-1") self.assertEqual(m.call_data["cluster_size"], 3) + def test_mysql_config_options(self): + """ + Test that MySQL configuration options can be retrieved + """ + + config = self.client.database.mysql_config_options() + + self.assertEqual( + "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + config["mysql"]["connect_timeout"]["description"], + ) + self.assertEqual(10, config["mysql"]["connect_timeout"]["example"]) + self.assertEqual(3600, config["mysql"]["connect_timeout"]["maximum"]) + self.assertEqual(2, config["mysql"]["connect_timeout"]["minimum"]) + self.assertFalse(config["mysql"]["connect_timeout"]["requires_restart"]) + self.assertEqual("integer", config["mysql"]["connect_timeout"]["type"]) + + self.assertEqual( + "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + config["mysql"]["default_time_zone"]["description"], + ) + self.assertEqual( + "+03:00", config["mysql"]["default_time_zone"]["example"] + ) + self.assertEqual(100, config["mysql"]["default_time_zone"]["maxLength"]) + self.assertEqual(2, config["mysql"]["default_time_zone"]["minLength"]) + self.assertEqual( + "^([-+][\\d:]*|[\\w/]*)$", + config["mysql"]["default_time_zone"]["pattern"], + ) + self.assertFalse( + config["mysql"]["default_time_zone"]["requires_restart"] + ) + self.assertEqual("string", config["mysql"]["default_time_zone"]["type"]) + + self.assertEqual( + "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + config["mysql"]["group_concat_max_len"]["description"], + ) + self.assertEqual( + 1024, config["mysql"]["group_concat_max_len"]["example"] + ) + self.assertEqual( + 18446744073709551600, + config["mysql"]["group_concat_max_len"]["maximum"], + ) + self.assertEqual(4, config["mysql"]["group_concat_max_len"]["minimum"]) + self.assertFalse( + config["mysql"]["group_concat_max_len"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["group_concat_max_len"]["type"] + ) + + self.assertEqual( + "The time, in seconds, before cached statistics expire", + config["mysql"]["information_schema_stats_expiry"]["description"], + ) + self.assertEqual( + 86400, config["mysql"]["information_schema_stats_expiry"]["example"] + ) + self.assertEqual( + 31536000, + config["mysql"]["information_schema_stats_expiry"]["maximum"], + ) + self.assertEqual( + 900, config["mysql"]["information_schema_stats_expiry"]["minimum"] + ) + self.assertFalse( + config["mysql"]["information_schema_stats_expiry"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["mysql"]["information_schema_stats_expiry"]["type"], + ) + + self.assertEqual( + "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + config["mysql"]["innodb_change_buffer_max_size"]["description"], + ) + self.assertEqual( + 30, config["mysql"]["innodb_change_buffer_max_size"]["example"] + ) + self.assertEqual( + 50, config["mysql"]["innodb_change_buffer_max_size"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_change_buffer_max_size"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_change_buffer_max_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_change_buffer_max_size"]["type"] + ) + + self.assertEqual( + "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + config["mysql"]["innodb_flush_neighbors"]["description"], + ) + self.assertEqual( + 0, config["mysql"]["innodb_flush_neighbors"]["example"] + ) + self.assertEqual( + 2, config["mysql"]["innodb_flush_neighbors"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_flush_neighbors"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_flush_neighbors"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_flush_neighbors"]["type"] + ) + + self.assertEqual( + "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_ft_min_token_size"]["description"], + ) + self.assertEqual( + 3, config["mysql"]["innodb_ft_min_token_size"]["example"] + ) + self.assertEqual( + 16, config["mysql"]["innodb_ft_min_token_size"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_ft_min_token_size"]["minimum"] + ) + self.assertTrue( + config["mysql"]["innodb_ft_min_token_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_ft_min_token_size"]["type"] + ) + + self.assertEqual( + "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + config["mysql"]["innodb_ft_server_stopword_table"]["description"], + ) + self.assertEqual( + "db_name/table_name", + config["mysql"]["innodb_ft_server_stopword_table"]["example"], + ) + self.assertEqual( + 1024, + config["mysql"]["innodb_ft_server_stopword_table"]["maxLength"], + ) + self.assertEqual( + "^.+/.+$", + config["mysql"]["innodb_ft_server_stopword_table"]["pattern"], + ) + self.assertFalse( + config["mysql"]["innodb_ft_server_stopword_table"][ + "requires_restart" + ] + ) + self.assertEqual( + ["null", "string"], + config["mysql"]["innodb_ft_server_stopword_table"]["type"], + ) + + self.assertEqual( + "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + config["mysql"]["innodb_lock_wait_timeout"]["description"], + ) + self.assertEqual( + 50, config["mysql"]["innodb_lock_wait_timeout"]["example"] + ) + self.assertEqual( + 3600, config["mysql"]["innodb_lock_wait_timeout"]["maximum"] + ) + self.assertEqual( + 1, config["mysql"]["innodb_lock_wait_timeout"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_lock_wait_timeout"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_lock_wait_timeout"]["type"] + ) + + self.assertEqual( + "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + config["mysql"]["innodb_log_buffer_size"]["description"], + ) + self.assertEqual( + 16777216, config["mysql"]["innodb_log_buffer_size"]["example"] + ) + self.assertEqual( + 4294967295, config["mysql"]["innodb_log_buffer_size"]["maximum"] + ) + self.assertEqual( + 1048576, config["mysql"]["innodb_log_buffer_size"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_log_buffer_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_log_buffer_size"]["type"] + ) + + self.assertEqual( + "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + config["mysql"]["innodb_online_alter_log_max_size"]["description"], + ) + self.assertEqual( + 134217728, + config["mysql"]["innodb_online_alter_log_max_size"]["example"], + ) + self.assertEqual( + 1099511627776, + config["mysql"]["innodb_online_alter_log_max_size"]["maximum"], + ) + self.assertEqual( + 65536, + config["mysql"]["innodb_online_alter_log_max_size"]["minimum"], + ) + self.assertFalse( + config["mysql"]["innodb_online_alter_log_max_size"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["mysql"]["innodb_online_alter_log_max_size"]["type"], + ) + + self.assertEqual( + "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_read_io_threads"]["description"], + ) + self.assertEqual( + 10, config["mysql"]["innodb_read_io_threads"]["example"] + ) + self.assertEqual( + 64, config["mysql"]["innodb_read_io_threads"]["maximum"] + ) + self.assertEqual( + 1, config["mysql"]["innodb_read_io_threads"]["minimum"] + ) + self.assertTrue( + config["mysql"]["innodb_read_io_threads"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_read_io_threads"]["type"] + ) + + self.assertEqual( + "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_rollback_on_timeout"]["description"], + ) + self.assertTrue( + config["mysql"]["innodb_rollback_on_timeout"]["example"] + ) + self.assertTrue( + config["mysql"]["innodb_rollback_on_timeout"]["requires_restart"] + ) + self.assertEqual( + "boolean", config["mysql"]["innodb_rollback_on_timeout"]["type"] + ) + + self.assertEqual( + "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + config["mysql"]["innodb_thread_concurrency"]["description"], + ) + self.assertEqual( + 10, config["mysql"]["innodb_thread_concurrency"]["example"] + ) + self.assertEqual( + 1000, config["mysql"]["innodb_thread_concurrency"]["maximum"] + ) + self.assertEqual( + 0, config["mysql"]["innodb_thread_concurrency"]["minimum"] + ) + self.assertFalse( + config["mysql"]["innodb_thread_concurrency"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_thread_concurrency"]["type"] + ) + + self.assertEqual( + "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["innodb_write_io_threads"]["description"], + ) + self.assertEqual( + 10, config["mysql"]["innodb_write_io_threads"]["example"] + ) + self.assertEqual( + 64, config["mysql"]["innodb_write_io_threads"]["maximum"] + ) + self.assertEqual( + 1, config["mysql"]["innodb_write_io_threads"]["minimum"] + ) + self.assertTrue( + config["mysql"]["innodb_write_io_threads"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["innodb_write_io_threads"]["type"] + ) + + self.assertEqual( + "The number of seconds the server waits for activity on an interactive connection before closing it.", + config["mysql"]["interactive_timeout"]["description"], + ) + self.assertEqual( + 3600, config["mysql"]["interactive_timeout"]["example"] + ) + self.assertEqual( + 604800, config["mysql"]["interactive_timeout"]["maximum"] + ) + self.assertEqual(30, config["mysql"]["interactive_timeout"]["minimum"]) + self.assertFalse( + config["mysql"]["interactive_timeout"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["interactive_timeout"]["type"] + ) + + self.assertEqual( + "The storage engine for in-memory internal temporary tables.", + config["mysql"]["internal_tmp_mem_storage_engine"]["description"], + ) + self.assertEqual( + "TempTable", + config["mysql"]["internal_tmp_mem_storage_engine"]["example"], + ) + self.assertEqual( + ["TempTable", "MEMORY"], + config["mysql"]["internal_tmp_mem_storage_engine"]["enum"], + ) + self.assertFalse( + config["mysql"]["internal_tmp_mem_storage_engine"][ + "requires_restart" + ] + ) + self.assertEqual( + "string", config["mysql"]["internal_tmp_mem_storage_engine"]["type"] + ) + + self.assertEqual( + "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + config["mysql"]["max_allowed_packet"]["description"], + ) + self.assertEqual( + 67108864, config["mysql"]["max_allowed_packet"]["example"] + ) + self.assertEqual( + 1073741824, config["mysql"]["max_allowed_packet"]["maximum"] + ) + self.assertEqual( + 102400, config["mysql"]["max_allowed_packet"]["minimum"] + ) + self.assertFalse( + config["mysql"]["max_allowed_packet"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["max_allowed_packet"]["type"] + ) + + self.assertEqual( + "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + config["mysql"]["max_heap_table_size"]["description"], + ) + self.assertEqual( + 16777216, config["mysql"]["max_heap_table_size"]["example"] + ) + self.assertEqual( + 1073741824, config["mysql"]["max_heap_table_size"]["maximum"] + ) + self.assertEqual( + 1048576, config["mysql"]["max_heap_table_size"]["minimum"] + ) + self.assertFalse( + config["mysql"]["max_heap_table_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["max_heap_table_size"]["type"] + ) + + self.assertEqual( + "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + config["mysql"]["net_buffer_length"]["description"], + ) + self.assertEqual(16384, config["mysql"]["net_buffer_length"]["example"]) + self.assertEqual( + 1048576, config["mysql"]["net_buffer_length"]["maximum"] + ) + self.assertEqual(1024, config["mysql"]["net_buffer_length"]["minimum"]) + self.assertTrue( + config["mysql"]["net_buffer_length"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["net_buffer_length"]["type"] + ) + + self.assertEqual( + "The number of seconds to wait for more data from a connection before aborting the read.", + config["mysql"]["net_read_timeout"]["description"], + ) + self.assertEqual(30, config["mysql"]["net_read_timeout"]["example"]) + self.assertEqual(3600, config["mysql"]["net_read_timeout"]["maximum"]) + self.assertEqual(1, config["mysql"]["net_read_timeout"]["minimum"]) + self.assertFalse( + config["mysql"]["net_read_timeout"]["requires_restart"] + ) + self.assertEqual("integer", config["mysql"]["net_read_timeout"]["type"]) + + self.assertEqual( + "The number of seconds to wait for a block to be written to a connection before aborting the write.", + config["mysql"]["net_write_timeout"]["description"], + ) + self.assertEqual(30, config["mysql"]["net_write_timeout"]["example"]) + self.assertEqual(3600, config["mysql"]["net_write_timeout"]["maximum"]) + self.assertEqual(1, config["mysql"]["net_write_timeout"]["minimum"]) + self.assertFalse( + config["mysql"]["net_write_timeout"]["requires_restart"] + ) + self.assertEqual( + "integer", config["mysql"]["net_write_timeout"]["type"] + ) + + self.assertEqual( + "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + config["mysql"]["sort_buffer_size"]["description"], + ) + self.assertEqual(262144, config["mysql"]["sort_buffer_size"]["example"]) + self.assertEqual( + 1073741824, config["mysql"]["sort_buffer_size"]["maximum"] + ) + self.assertEqual(32768, config["mysql"]["sort_buffer_size"]["minimum"]) + self.assertFalse( + config["mysql"]["sort_buffer_size"]["requires_restart"] + ) + self.assertEqual("integer", config["mysql"]["sort_buffer_size"]["type"]) + + self.assertEqual( + "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Akamai default SQL mode (strict, SQL standard compliant) will be assigned.", + config["mysql"]["sql_mode"]["description"], + ) + self.assertEqual( + "ANSI,TRADITIONAL", config["mysql"]["sql_mode"]["example"] + ) + self.assertEqual(1024, config["mysql"]["sql_mode"]["maxLength"]) + self.assertEqual( + "^[A-Z_]*(,[A-Z_]+)*$", config["mysql"]["sql_mode"]["pattern"] + ) + self.assertFalse(config["mysql"]["sql_mode"]["requires_restart"]) + self.assertEqual("string", config["mysql"]["sql_mode"]["type"]) + + self.assertEqual( + "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + config["mysql"]["sql_require_primary_key"]["description"], + ) + self.assertTrue(config["mysql"]["sql_require_primary_key"]["example"]) + self.assertFalse( + config["mysql"]["sql_require_primary_key"]["requires_restart"] + ) + self.assertEqual( + "boolean", config["mysql"]["sql_require_primary_key"]["type"] + ) + + self.assertEqual( + "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + config["mysql"]["tmp_table_size"]["description"], + ) + self.assertEqual(16777216, config["mysql"]["tmp_table_size"]["example"]) + self.assertEqual( + 1073741824, config["mysql"]["tmp_table_size"]["maximum"] + ) + self.assertEqual(1048576, config["mysql"]["tmp_table_size"]["minimum"]) + self.assertFalse(config["mysql"]["tmp_table_size"]["requires_restart"]) + self.assertEqual("integer", config["mysql"]["tmp_table_size"]["type"]) + + self.assertEqual( + "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + config["mysql"]["wait_timeout"]["description"], + ) + self.assertEqual(28800, config["mysql"]["wait_timeout"]["example"]) + self.assertEqual(2147483, config["mysql"]["wait_timeout"]["maximum"]) + self.assertEqual(1, config["mysql"]["wait_timeout"]["minimum"]) + self.assertFalse(config["mysql"]["wait_timeout"]["requires_restart"]) + self.assertEqual("integer", config["mysql"]["wait_timeout"]["type"]) + + self.assertEqual( + "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + config["binlog_retention_period"]["description"], + ) + self.assertEqual(600, config["binlog_retention_period"]["example"]) + self.assertEqual(86400, config["binlog_retention_period"]["maximum"]) + self.assertEqual(600, config["binlog_retention_period"]["minimum"]) + self.assertFalse(config["binlog_retention_period"]["requires_restart"]) + self.assertEqual("integer", config["binlog_retention_period"]["type"]) + + def test_postgresql_config_options(self): + """ + Test that PostgreSQL configuration options can be retrieved + """ + + config = self.client.database.postgresql_config_options() + + self.assertEqual( + "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when " + + "deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + config["pg"]["autovacuum_analyze_scale_factor"]["description"], + ) + self.assertEqual( + 1.0, config["pg"]["autovacuum_analyze_scale_factor"]["maximum"] + ) + self.assertEqual( + 0.0, config["pg"]["autovacuum_analyze_scale_factor"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_analyze_scale_factor"]["requires_restart"] + ) + self.assertEqual( + "number", config["pg"]["autovacuum_analyze_scale_factor"]["type"] + ) + + self.assertEqual( + "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + config["pg"]["autovacuum_analyze_threshold"]["description"], + ) + self.assertEqual( + 2147483647, config["pg"]["autovacuum_analyze_threshold"]["maximum"] + ) + self.assertEqual( + 0, config["pg"]["autovacuum_analyze_threshold"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_analyze_threshold"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_analyze_threshold"]["type"] + ) + + self.assertEqual( + "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + config["pg"]["autovacuum_max_workers"]["description"], + ) + self.assertEqual(20, config["pg"]["autovacuum_max_workers"]["maximum"]) + self.assertEqual(1, config["pg"]["autovacuum_max_workers"]["minimum"]) + self.assertFalse( + config["pg"]["autovacuum_max_workers"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_max_workers"]["type"] + ) + + self.assertEqual( + "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + config["pg"]["autovacuum_naptime"]["description"], + ) + self.assertEqual(86400, config["pg"]["autovacuum_naptime"]["maximum"]) + self.assertEqual(1, config["pg"]["autovacuum_naptime"]["minimum"]) + self.assertFalse(config["pg"]["autovacuum_naptime"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["autovacuum_naptime"]["type"]) + + self.assertEqual( + "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + config["pg"]["autovacuum_vacuum_cost_delay"]["description"], + ) + self.assertEqual( + 100, config["pg"]["autovacuum_vacuum_cost_delay"]["maximum"] + ) + self.assertEqual( + -1, config["pg"]["autovacuum_vacuum_cost_delay"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_cost_delay"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_vacuum_cost_delay"]["type"] + ) + + self.assertEqual( + "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + config["pg"]["autovacuum_vacuum_cost_limit"]["description"], + ) + self.assertEqual( + 10000, config["pg"]["autovacuum_vacuum_cost_limit"]["maximum"] + ) + self.assertEqual( + -1, config["pg"]["autovacuum_vacuum_cost_limit"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_cost_limit"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_vacuum_cost_limit"]["type"] + ) + + self.assertEqual( + "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + config["pg"]["autovacuum_vacuum_scale_factor"]["description"], + ) + self.assertEqual( + 1.0, config["pg"]["autovacuum_vacuum_scale_factor"]["maximum"] + ) + self.assertEqual( + 0.0, config["pg"]["autovacuum_vacuum_scale_factor"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_scale_factor"]["requires_restart"] + ) + self.assertEqual( + "number", config["pg"]["autovacuum_vacuum_scale_factor"]["type"] + ) + + self.assertEqual( + "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + config["pg"]["autovacuum_vacuum_threshold"]["description"], + ) + self.assertEqual( + 2147483647, config["pg"]["autovacuum_vacuum_threshold"]["maximum"] + ) + self.assertEqual( + 0, config["pg"]["autovacuum_vacuum_threshold"]["minimum"] + ) + self.assertFalse( + config["pg"]["autovacuum_vacuum_threshold"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["autovacuum_vacuum_threshold"]["type"] + ) + + self.assertEqual( + "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + config["pg"]["bgwriter_delay"]["description"], + ) + self.assertEqual(200, config["pg"]["bgwriter_delay"]["example"]) + self.assertEqual(10000, config["pg"]["bgwriter_delay"]["maximum"]) + self.assertEqual(10, config["pg"]["bgwriter_delay"]["minimum"]) + self.assertFalse(config["pg"]["bgwriter_delay"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["bgwriter_delay"]["type"]) + + self.assertEqual( + "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + config["pg"]["bgwriter_flush_after"]["description"], + ) + self.assertEqual(512, config["pg"]["bgwriter_flush_after"]["example"]) + self.assertEqual(2048, config["pg"]["bgwriter_flush_after"]["maximum"]) + self.assertEqual(0, config["pg"]["bgwriter_flush_after"]["minimum"]) + self.assertFalse( + config["pg"]["bgwriter_flush_after"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["bgwriter_flush_after"]["type"] + ) + + self.assertEqual( + "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + config["pg"]["bgwriter_lru_maxpages"]["description"], + ) + self.assertEqual(100, config["pg"]["bgwriter_lru_maxpages"]["example"]) + self.assertEqual( + 1073741823, config["pg"]["bgwriter_lru_maxpages"]["maximum"] + ) + self.assertEqual(0, config["pg"]["bgwriter_lru_maxpages"]["minimum"]) + self.assertFalse( + config["pg"]["bgwriter_lru_maxpages"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["bgwriter_lru_maxpages"]["type"] + ) + + self.assertEqual( + "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + config["pg"]["bgwriter_lru_multiplier"]["description"], + ) + self.assertEqual( + 2.0, config["pg"]["bgwriter_lru_multiplier"]["example"] + ) + self.assertEqual( + 10.0, config["pg"]["bgwriter_lru_multiplier"]["maximum"] + ) + self.assertEqual( + 0.0, config["pg"]["bgwriter_lru_multiplier"]["minimum"] + ) + self.assertFalse( + config["pg"]["bgwriter_lru_multiplier"]["requires_restart"] + ) + self.assertEqual( + "number", config["pg"]["bgwriter_lru_multiplier"]["type"] + ) + + self.assertEqual( + "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + config["pg"]["deadlock_timeout"]["description"], + ) + self.assertEqual(1000, config["pg"]["deadlock_timeout"]["example"]) + self.assertEqual(1800000, config["pg"]["deadlock_timeout"]["maximum"]) + self.assertEqual(500, config["pg"]["deadlock_timeout"]["minimum"]) + self.assertFalse(config["pg"]["deadlock_timeout"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["deadlock_timeout"]["type"]) + + self.assertEqual( + "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + config["pg"]["default_toast_compression"]["description"], + ) + self.assertEqual( + ["lz4", "pglz"], config["pg"]["default_toast_compression"]["enum"] + ) + self.assertEqual( + "lz4", config["pg"]["default_toast_compression"]["example"] + ) + self.assertFalse( + config["pg"]["default_toast_compression"]["requires_restart"] + ) + self.assertEqual( + "string", config["pg"]["default_toast_compression"]["type"] + ) + + self.assertEqual( + "Time out sessions with open transactions after this number of milliseconds", + config["pg"]["idle_in_transaction_session_timeout"]["description"], + ) + self.assertEqual( + 604800000, + config["pg"]["idle_in_transaction_session_timeout"]["maximum"], + ) + self.assertEqual( + 0, config["pg"]["idle_in_transaction_session_timeout"]["minimum"] + ) + self.assertFalse( + config["pg"]["idle_in_transaction_session_timeout"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["pg"]["idle_in_transaction_session_timeout"]["type"], + ) + + self.assertEqual( + "Controls system-wide use of Just-in-Time Compilation (JIT).", + config["pg"]["jit"]["description"], + ) + self.assertTrue(config["pg"]["jit"]["example"]) + self.assertFalse(config["pg"]["jit"]["requires_restart"]) + self.assertEqual("boolean", config["pg"]["jit"]["type"]) + + self.assertEqual( + "PostgreSQL maximum number of files that can be open per process", + config["pg"]["max_files_per_process"]["description"], + ) + self.assertEqual(4096, config["pg"]["max_files_per_process"]["maximum"]) + self.assertEqual(1000, config["pg"]["max_files_per_process"]["minimum"]) + self.assertFalse( + config["pg"]["max_files_per_process"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_files_per_process"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum locks per transaction", + config["pg"]["max_locks_per_transaction"]["description"], + ) + self.assertEqual( + 6400, config["pg"]["max_locks_per_transaction"]["maximum"] + ) + self.assertEqual( + 64, config["pg"]["max_locks_per_transaction"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_locks_per_transaction"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_locks_per_transaction"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + config["pg"]["max_logical_replication_workers"]["description"], + ) + self.assertEqual( + 64, config["pg"]["max_logical_replication_workers"]["maximum"] + ) + self.assertEqual( + 4, config["pg"]["max_logical_replication_workers"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_logical_replication_workers"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_logical_replication_workers"]["type"] + ) + + self.assertEqual( + "Sets the maximum number of workers that the system can support for parallel queries", + config["pg"]["max_parallel_workers"]["description"], + ) + self.assertEqual(96, config["pg"]["max_parallel_workers"]["maximum"]) + self.assertEqual(0, config["pg"]["max_parallel_workers"]["minimum"]) + self.assertFalse( + config["pg"]["max_parallel_workers"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_parallel_workers"]["type"] + ) + + self.assertEqual( + "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + config["pg"]["max_parallel_workers_per_gather"]["description"], + ) + self.assertEqual( + 96, config["pg"]["max_parallel_workers_per_gather"]["maximum"] + ) + self.assertEqual( + 0, config["pg"]["max_parallel_workers_per_gather"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_parallel_workers_per_gather"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_parallel_workers_per_gather"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum predicate locks per transaction", + config["pg"]["max_pred_locks_per_transaction"]["description"], + ) + self.assertEqual( + 5120, config["pg"]["max_pred_locks_per_transaction"]["maximum"] + ) + self.assertEqual( + 64, config["pg"]["max_pred_locks_per_transaction"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_pred_locks_per_transaction"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_pred_locks_per_transaction"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum replication slots", + config["pg"]["max_replication_slots"]["description"], + ) + self.assertEqual(64, config["pg"]["max_replication_slots"]["maximum"]) + self.assertEqual(8, config["pg"]["max_replication_slots"]["minimum"]) + self.assertFalse( + config["pg"]["max_replication_slots"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_replication_slots"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + config["pg"]["max_slot_wal_keep_size"]["description"], + ) + self.assertEqual( + 2147483647, config["pg"]["max_slot_wal_keep_size"]["maximum"] + ) + self.assertEqual(-1, config["pg"]["max_slot_wal_keep_size"]["minimum"]) + self.assertFalse( + config["pg"]["max_slot_wal_keep_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_slot_wal_keep_size"]["type"] + ) + + self.assertEqual( + "Maximum depth of the stack in bytes", + config["pg"]["max_stack_depth"]["description"], + ) + self.assertEqual(6291456, config["pg"]["max_stack_depth"]["maximum"]) + self.assertEqual(2097152, config["pg"]["max_stack_depth"]["minimum"]) + self.assertFalse(config["pg"]["max_stack_depth"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["max_stack_depth"]["type"]) + + self.assertEqual( + "Max standby archive delay in milliseconds", + config["pg"]["max_standby_archive_delay"]["description"], + ) + self.assertEqual( + 43200000, config["pg"]["max_standby_archive_delay"]["maximum"] + ) + self.assertEqual( + 1, config["pg"]["max_standby_archive_delay"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_standby_archive_delay"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_standby_archive_delay"]["type"] + ) + + self.assertEqual( + "Max standby streaming delay in milliseconds", + config["pg"]["max_standby_streaming_delay"]["description"], + ) + self.assertEqual( + 43200000, config["pg"]["max_standby_streaming_delay"]["maximum"] + ) + self.assertEqual( + 1, config["pg"]["max_standby_streaming_delay"]["minimum"] + ) + self.assertFalse( + config["pg"]["max_standby_streaming_delay"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_standby_streaming_delay"]["type"] + ) + + self.assertEqual( + "PostgreSQL maximum WAL senders", + config["pg"]["max_wal_senders"]["description"], + ) + self.assertEqual(64, config["pg"]["max_wal_senders"]["maximum"]) + self.assertEqual(20, config["pg"]["max_wal_senders"]["minimum"]) + self.assertFalse(config["pg"]["max_wal_senders"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["max_wal_senders"]["type"]) + + self.assertEqual( + "Sets the maximum number of background processes that the system can support", + config["pg"]["max_worker_processes"]["description"], + ) + self.assertEqual(96, config["pg"]["max_worker_processes"]["maximum"]) + self.assertEqual(8, config["pg"]["max_worker_processes"]["minimum"]) + self.assertFalse( + config["pg"]["max_worker_processes"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["max_worker_processes"]["type"] + ) + + self.assertEqual( + "Chooses the algorithm for encrypting passwords.", + config["pg"]["password_encryption"]["description"], + ) + self.assertEqual( + ["md5", "scram-sha-256"], + config["pg"]["password_encryption"]["enum"], + ) + self.assertEqual( + "scram-sha-256", config["pg"]["password_encryption"]["example"] + ) + self.assertFalse( + config["pg"]["password_encryption"]["requires_restart"] + ) + self.assertEqual( + ["string", "null"], config["pg"]["password_encryption"]["type"] + ) + + self.assertEqual( + "Sets the time interval to run pg_partman's scheduled tasks", + config["pg"]["pg_partman_bgw.interval"]["description"], + ) + self.assertEqual( + 3600, config["pg"]["pg_partman_bgw.interval"]["example"] + ) + self.assertEqual( + 604800, config["pg"]["pg_partman_bgw.interval"]["maximum"] + ) + self.assertEqual( + 3600, config["pg"]["pg_partman_bgw.interval"]["minimum"] + ) + self.assertFalse( + config["pg"]["pg_partman_bgw.interval"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["pg_partman_bgw.interval"]["type"] + ) + + self.assertEqual( + "Controls which role to use for pg_partman's scheduled background tasks.", + config["pg"]["pg_partman_bgw.role"]["description"], + ) + self.assertEqual( + "myrolename", config["pg"]["pg_partman_bgw.role"]["example"] + ) + self.assertEqual(64, config["pg"]["pg_partman_bgw.role"]["maxLength"]) + self.assertEqual( + "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", + config["pg"]["pg_partman_bgw.role"]["pattern"], + ) + self.assertFalse( + config["pg"]["pg_partman_bgw.role"]["requires_restart"] + ) + self.assertEqual("string", config["pg"]["pg_partman_bgw.role"]["type"]) + + self.assertEqual( + "Enables or disables query plan monitoring", + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"][ + "description" + ], + ) + self.assertFalse( + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"]["example"] + ) + self.assertFalse( + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"][ + "requires_restart" + ] + ) + self.assertEqual( + "boolean", + config["pg"]["pg_stat_monitor.pgsm_enable_query_plan"]["type"], + ) + + self.assertEqual( + "Sets the maximum number of buckets", + config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["description"], + ) + self.assertEqual( + 10, config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["example"] + ) + self.assertEqual( + 10, config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["maximum"] + ) + self.assertEqual( + 1, config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["minimum"] + ) + self.assertFalse( + config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["pg_stat_monitor.pgsm_max_buckets"]["type"] + ) + + self.assertEqual( + "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + config["pg"]["pg_stat_statements.track"]["description"], + ) + self.assertEqual( + ["all", "top", "none"], + config["pg"]["pg_stat_statements.track"]["enum"], + ) + self.assertFalse( + config["pg"]["pg_stat_statements.track"]["requires_restart"] + ) + self.assertEqual( + ["string"], config["pg"]["pg_stat_statements.track"]["type"] + ) + + self.assertEqual( + "PostgreSQL temporary file limit in KiB, -1 for unlimited", + config["pg"]["temp_file_limit"]["description"], + ) + self.assertEqual(5000000, config["pg"]["temp_file_limit"]["example"]) + self.assertEqual(2147483647, config["pg"]["temp_file_limit"]["maximum"]) + self.assertEqual(-1, config["pg"]["temp_file_limit"]["minimum"]) + self.assertFalse(config["pg"]["temp_file_limit"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["temp_file_limit"]["type"]) + + self.assertEqual( + "PostgreSQL service timezone", + config["pg"]["timezone"]["description"], + ) + self.assertEqual("Europe/Helsinki", config["pg"]["timezone"]["example"]) + self.assertEqual(64, config["pg"]["timezone"]["maxLength"]) + self.assertEqual("^[\\w/]*$", config["pg"]["timezone"]["pattern"]) + self.assertFalse(config["pg"]["timezone"]["requires_restart"]) + self.assertEqual("string", config["pg"]["timezone"]["type"]) + + self.assertEqual( + "Specifies the number of bytes reserved to track the currently executing command for each active session.", + config["pg"]["track_activity_query_size"]["description"], + ) + self.assertEqual( + 1024, config["pg"]["track_activity_query_size"]["example"] + ) + self.assertEqual( + 10240, config["pg"]["track_activity_query_size"]["maximum"] + ) + self.assertEqual( + 1024, config["pg"]["track_activity_query_size"]["minimum"] + ) + self.assertFalse( + config["pg"]["track_activity_query_size"]["requires_restart"] + ) + self.assertEqual( + "integer", config["pg"]["track_activity_query_size"]["type"] + ) + + self.assertEqual( + "Record commit time of transactions.", + config["pg"]["track_commit_timestamp"]["description"], + ) + self.assertEqual( + "off", config["pg"]["track_commit_timestamp"]["example"] + ) + self.assertEqual( + ["off", "on"], config["pg"]["track_commit_timestamp"]["enum"] + ) + self.assertFalse( + config["pg"]["track_commit_timestamp"]["requires_restart"] + ) + self.assertEqual( + "string", config["pg"]["track_commit_timestamp"]["type"] + ) + + self.assertEqual( + "Enables tracking of function call counts and time used.", + config["pg"]["track_functions"]["description"], + ) + self.assertEqual( + ["all", "pl", "none"], config["pg"]["track_functions"]["enum"] + ) + self.assertFalse(config["pg"]["track_functions"]["requires_restart"]) + self.assertEqual("string", config["pg"]["track_functions"]["type"]) + + self.assertEqual( + "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + config["pg"]["track_io_timing"]["description"], + ) + self.assertEqual("off", config["pg"]["track_io_timing"]["example"]) + self.assertEqual(["off", "on"], config["pg"]["track_io_timing"]["enum"]) + self.assertFalse(config["pg"]["track_io_timing"]["requires_restart"]) + self.assertEqual("string", config["pg"]["track_io_timing"]["type"]) + + self.assertEqual( + "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + config["pg"]["wal_sender_timeout"]["description"], + ) + self.assertEqual(60000, config["pg"]["wal_sender_timeout"]["example"]) + self.assertFalse(config["pg"]["wal_sender_timeout"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["wal_sender_timeout"]["type"]) + + self.assertEqual( + "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + config["pg"]["wal_writer_delay"]["description"], + ) + self.assertEqual(50, config["pg"]["wal_writer_delay"]["example"]) + self.assertEqual(200, config["pg"]["wal_writer_delay"]["maximum"]) + self.assertEqual(10, config["pg"]["wal_writer_delay"]["minimum"]) + self.assertFalse(config["pg"]["wal_writer_delay"]["requires_restart"]) + self.assertEqual("integer", config["pg"]["wal_writer_delay"]["type"]) + + self.assertEqual( + "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted. When this extension is enabled, pg_stat_statements results for utility commands are unreliable", + config["pg_stat_monitor_enable"]["description"], + ) + self.assertTrue(config["pg_stat_monitor_enable"]["requires_restart"]) + self.assertEqual("boolean", config["pg_stat_monitor_enable"]["type"]) + + self.assertEqual( + "Number of seconds of master unavailability before triggering database failover to standby", + config["pglookout"]["max_failover_replication_time_lag"][ + "description" + ], + ) + self.assertEqual( + int(9223372036854775000), + config["pglookout"]["max_failover_replication_time_lag"]["maximum"], + ) + self.assertEqual( + int(10), + config["pglookout"]["max_failover_replication_time_lag"]["minimum"], + ) + self.assertFalse( + config["pglookout"]["max_failover_replication_time_lag"][ + "requires_restart" + ] + ) + self.assertEqual( + "integer", + config["pglookout"]["max_failover_replication_time_lag"]["type"], + ) + + self.assertEqual( + "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + config["shared_buffers_percentage"]["description"], + ) + self.assertEqual(41.5, config["shared_buffers_percentage"]["example"]) + self.assertEqual(60.0, config["shared_buffers_percentage"]["maximum"]) + self.assertEqual(20.0, config["shared_buffers_percentage"]["minimum"]) + self.assertFalse( + config["shared_buffers_percentage"]["requires_restart"] + ) + self.assertEqual("number", config["shared_buffers_percentage"]["type"]) + + self.assertEqual( + "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + config["work_mem"]["description"], + ) + self.assertEqual(4, config["work_mem"]["example"]) + self.assertEqual(1024, config["work_mem"]["maximum"]) + self.assertEqual(1, config["work_mem"]["minimum"]) + self.assertFalse(config["work_mem"]["requires_restart"]) + self.assertEqual("integer", config["work_mem"]["type"]) + class PostgreSQLDatabaseTest(ClientBaseCase): """ diff --git a/test/unit/objects/database_test.py b/test/unit/objects/database_test.py index 51c7de4c..8605e43c 100644 --- a/test/unit/objects/database_test.py +++ b/test/unit/objects/database_test.py @@ -1,7 +1,13 @@ import logging from test.unit.base import ClientBaseCase -from linode_api4 import PostgreSQLDatabase +from linode_api4 import ( + MySQLDatabaseConfigMySQLOptions, + MySQLDatabaseConfigOptions, + PostgreSQLDatabase, + PostgreSQLDatabaseConfigOptions, + PostgreSQLDatabaseConfigPGOptions, +) from linode_api4.objects import MySQLDatabase logger = logging.getLogger(__name__) @@ -103,6 +109,59 @@ def test_get_instances(self): self.assertEqual(dbs[0].region, "us-east") self.assertEqual(dbs[0].updates.duration, 3) self.assertEqual(dbs[0].version, "8.0.26") + self.assertEqual(dbs[0].engine_config.binlog_retention_period, 600) + self.assertEqual(dbs[0].engine_config.mysql.connect_timeout, 10) + self.assertEqual(dbs[0].engine_config.mysql.default_time_zone, "+03:00") + self.assertEqual(dbs[0].engine_config.mysql.group_concat_max_len, 1024) + self.assertEqual( + dbs[0].engine_config.mysql.information_schema_stats_expiry, 86400 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_change_buffer_max_size, 30 + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_flush_neighbors, 0) + self.assertEqual(dbs[0].engine_config.mysql.innodb_ft_min_token_size, 3) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_ft_server_stopword_table, + "db_name/table_name", + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_lock_wait_timeout, 50 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_log_buffer_size, 16777216 + ) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_online_alter_log_max_size, + 134217728, + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_read_io_threads, 10) + self.assertTrue(dbs[0].engine_config.mysql.innodb_rollback_on_timeout) + self.assertEqual( + dbs[0].engine_config.mysql.innodb_thread_concurrency, 10 + ) + self.assertEqual(dbs[0].engine_config.mysql.innodb_write_io_threads, 10) + self.assertEqual(dbs[0].engine_config.mysql.interactive_timeout, 3600) + self.assertEqual( + dbs[0].engine_config.mysql.internal_tmp_mem_storage_engine, + "TempTable", + ) + self.assertEqual( + dbs[0].engine_config.mysql.max_allowed_packet, 67108864 + ) + self.assertEqual( + dbs[0].engine_config.mysql.max_heap_table_size, 16777216 + ) + self.assertEqual(dbs[0].engine_config.mysql.net_buffer_length, 16384) + self.assertEqual(dbs[0].engine_config.mysql.net_read_timeout, 30) + self.assertEqual(dbs[0].engine_config.mysql.net_write_timeout, 30) + self.assertEqual(dbs[0].engine_config.mysql.sort_buffer_size, 262144) + self.assertEqual( + dbs[0].engine_config.mysql.sql_mode, "ANSI,TRADITIONAL" + ) + self.assertTrue(dbs[0].engine_config.mysql.sql_require_primary_key) + self.assertEqual(dbs[0].engine_config.mysql.tmp_table_size, 16777216) + self.assertEqual(dbs[0].engine_config.mysql.wait_timeout, 28800) def test_create(self): """ @@ -121,6 +180,12 @@ def test_create(self): "mysql/8.0.26", "g6-standard-1", cluster_size=3, + engine_config=MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions( + connect_timeout=20 + ), + binlog_retention_period=200, + ), ) except Exception as e: logger.warning( @@ -134,6 +199,12 @@ def test_create(self): self.assertEqual(m.call_data["engine"], "mysql/8.0.26") self.assertEqual(m.call_data["type"], "g6-standard-1") self.assertEqual(m.call_data["cluster_size"], 3) + self.assertEqual( + m.call_data["engine_config"]["mysql"]["connect_timeout"], 20 + ) + self.assertEqual( + m.call_data["engine_config"]["binlog_retention_period"], 200 + ) def test_update(self): """ @@ -148,6 +219,10 @@ def test_update(self): db.updates.day_of_week = 2 db.allow_list = new_allow_list db.label = "cool" + db.engine_config = MySQLDatabaseConfigOptions( + mysql=MySQLDatabaseConfigMySQLOptions(connect_timeout=20), + binlog_retention_period=200, + ) db.save() @@ -156,6 +231,12 @@ def test_update(self): self.assertEqual(m.call_data["label"], "cool") self.assertEqual(m.call_data["updates"]["day_of_week"], 2) self.assertEqual(m.call_data["allow_list"], new_allow_list) + self.assertEqual( + m.call_data["engine_config"]["mysql"]["connect_timeout"], 20 + ) + self.assertEqual( + m.call_data["engine_config"]["binlog_retention_period"], 200 + ) def test_list_backups(self): """ @@ -321,6 +402,97 @@ def test_get_instances(self): self.assertEqual(dbs[0].updates.duration, 3) self.assertEqual(dbs[0].version, "13.2") + print(dbs[0].engine_config.pg.__dict__) + + self.assertTrue(dbs[0].engine_config.pg_stat_monitor_enable) + self.assertEqual( + dbs[0].engine_config.pglookout.max_failover_replication_time_lag, + 1000, + ) + self.assertEqual(dbs[0].engine_config.shared_buffers_percentage, 41.5) + self.assertEqual(dbs[0].engine_config.work_mem, 4) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_analyze_scale_factor, 0.5 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_analyze_threshold, 100 + ) + self.assertEqual(dbs[0].engine_config.pg.autovacuum_max_workers, 10) + self.assertEqual(dbs[0].engine_config.pg.autovacuum_naptime, 100) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_cost_delay, 50 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_cost_limit, 100 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_scale_factor, 0.5 + ) + self.assertEqual( + dbs[0].engine_config.pg.autovacuum_vacuum_threshold, 100 + ) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_delay, 200) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_flush_after, 512) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_maxpages, 100) + self.assertEqual(dbs[0].engine_config.pg.bgwriter_lru_multiplier, 2.0) + self.assertEqual(dbs[0].engine_config.pg.deadlock_timeout, 1000) + self.assertEqual( + dbs[0].engine_config.pg.default_toast_compression, "lz4" + ) + self.assertEqual( + dbs[0].engine_config.pg.idle_in_transaction_session_timeout, 100 + ) + self.assertTrue(dbs[0].engine_config.pg.jit) + self.assertEqual(dbs[0].engine_config.pg.max_files_per_process, 100) + self.assertEqual(dbs[0].engine_config.pg.max_locks_per_transaction, 100) + self.assertEqual( + dbs[0].engine_config.pg.max_logical_replication_workers, 32 + ) + self.assertEqual(dbs[0].engine_config.pg.max_parallel_workers, 64) + self.assertEqual( + dbs[0].engine_config.pg.max_parallel_workers_per_gather, 64 + ) + self.assertEqual( + dbs[0].engine_config.pg.max_pred_locks_per_transaction, 1000 + ) + self.assertEqual(dbs[0].engine_config.pg.max_replication_slots, 32) + self.assertEqual(dbs[0].engine_config.pg.max_slot_wal_keep_size, 100) + self.assertEqual(dbs[0].engine_config.pg.max_stack_depth, 3507152) + self.assertEqual( + dbs[0].engine_config.pg.max_standby_archive_delay, 1000 + ) + self.assertEqual( + dbs[0].engine_config.pg.max_standby_streaming_delay, 1000 + ) + self.assertEqual(dbs[0].engine_config.pg.max_wal_senders, 32) + self.assertEqual(dbs[0].engine_config.pg.max_worker_processes, 64) + self.assertEqual( + dbs[0].engine_config.pg.password_encryption, "scram-sha-256" + ) + self.assertEqual(dbs[0].engine_config.pg.pg_partman_bgw_interval, 3600) + self.assertEqual( + dbs[0].engine_config.pg.pg_partman_bgw_role, "myrolename" + ) + self.assertFalse( + dbs[0].engine_config.pg.pg_stat_monitor_pgsm_enable_query_plan + ) + self.assertEqual( + dbs[0].engine_config.pg.pg_stat_monitor_pgsm_max_buckets, 10 + ) + self.assertEqual( + dbs[0].engine_config.pg.pg_stat_statements_track, "top" + ) + self.assertEqual(dbs[0].engine_config.pg.temp_file_limit, 5000000) + self.assertEqual(dbs[0].engine_config.pg.timezone, "Europe/Helsinki") + self.assertEqual( + dbs[0].engine_config.pg.track_activity_query_size, 1024 + ) + self.assertEqual(dbs[0].engine_config.pg.track_commit_timestamp, "off") + self.assertEqual(dbs[0].engine_config.pg.track_functions, "all") + self.assertEqual(dbs[0].engine_config.pg.track_io_timing, "off") + self.assertEqual(dbs[0].engine_config.pg.wal_sender_timeout, 60000) + self.assertEqual(dbs[0].engine_config.pg.wal_writer_delay, 50) + def test_create(self): """ Test that PostgreSQL databases can be created @@ -336,6 +508,17 @@ def test_create(self): "postgresql/13.2", "g6-standard-1", cluster_size=3, + engine_config=PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.5, + pg_partman_bgw_interval=3600, + pg_partman_bgw_role="myrolename", + pg_stat_monitor_pgsm_enable_query_plan=False, + pg_stat_monitor_pgsm_max_buckets=10, + pg_stat_statements_track="top", + ), + work_mem=4, + ), ) except Exception: pass @@ -347,6 +530,37 @@ def test_create(self): self.assertEqual(m.call_data["engine"], "postgresql/13.2") self.assertEqual(m.call_data["type"], "g6-standard-1") self.assertEqual(m.call_data["cluster_size"], 3) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "autovacuum_analyze_scale_factor" + ], + 0.5, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_partman_bgw.interval"], + 3600, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_partman_bgw.role"], + "myrolename", + ) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "pg_stat_monitor.pgsm_enable_query_plan" + ], + False, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "pg_stat_monitor.pgsm_max_buckets" + ], + 10, + ) + self.assertEqual( + m.call_data["engine_config"]["pg"]["pg_stat_statements.track"], + "top", + ) + self.assertEqual(m.call_data["engine_config"]["work_mem"], 4) def test_update(self): """ @@ -361,6 +575,12 @@ def test_update(self): db.updates.day_of_week = 2 db.allow_list = new_allow_list db.label = "cool" + db.engine_config = PostgreSQLDatabaseConfigOptions( + pg=PostgreSQLDatabaseConfigPGOptions( + autovacuum_analyze_scale_factor=0.5 + ), + work_mem=4, + ) db.save() @@ -369,6 +589,13 @@ def test_update(self): self.assertEqual(m.call_data["label"], "cool") self.assertEqual(m.call_data["updates"]["day_of_week"], 2) self.assertEqual(m.call_data["allow_list"], new_allow_list) + self.assertEqual( + m.call_data["engine_config"]["pg"][ + "autovacuum_analyze_scale_factor" + ], + 0.5, + ) + self.assertEqual(m.call_data["engine_config"]["work_mem"], 4) def test_list_backups(self): """