PARAM
There are 260+ parameters in Pigsty describing all aspect of the deployment.
ID | Name | Section | Type | Level | Comment |
---|---|---|---|---|---|
101 | version |
META |
string | G | pigsty version string |
102 | admin_ip |
META |
ip | G | admin node ip address |
103 | region |
META |
enum | G | upstream mirror region: default,china,europe |
104 | proxy_env |
META |
dict | G | global proxy env when downloading packages |
105 | ca_method |
CA |
enum | G | create,recreate,copy, create by default |
106 | ca_cn |
CA |
string | G | ca common name, fixed as pigsty-ca |
107 | cert_validity |
CA |
interval | G | cert validity, 20 years by default |
108 | infra_seq |
INFRA_ID |
int | I | infra node identity, REQUIRED |
109 | infra_portal |
INFRA_ID |
dict | G | infra services exposed via portal |
110 | repo_enabled |
REPO |
bool | G/I | create a yum repo on this infra node? |
111 | repo_home |
REPO |
path | G | repo home dir, /www by default |
112 | repo_name |
REPO |
string | G | repo name, pigsty by default |
113 | repo_endpoint |
REPO |
url | G | access point to this repo by domain or ip:port |
114 | repo_remove |
REPO |
bool | G/A | remove existing upstream repo |
115 | repo_upstream |
REPO |
upstream[] | G | where to download upstream packages |
116 | repo_packages |
REPO |
string[] | G | which packages to be included |
117 | repo_url_packages |
REPO |
string[] | G | extra packages from url |
118 | infra_packages |
INFRA_PACKAGE |
string[] | G | packages to be installed on infra nodes |
119 | infra_packages_pip |
INFRA_PACKAGE |
string | G | pip installed packages for infra nodes |
120 | nginx_enabled |
NGINX |
bool | G/I | enable nginx on this infra node? |
121 | nginx_sslmode |
NGINX |
enum | G | nginx ssl mode? disable,enable,enforce |
122 | nginx_home |
NGINX |
path | G | nginx content dir, /www by default |
123 | nginx_port |
NGINX |
port | G | nginx listen port, 80 by default |
124 | nginx_ssl_port |
NGINX |
port | G | nginx ssl listen port, 443 by default |
125 | nginx_navbar |
NGINX |
index[] | G | nginx index page navigation links |
126 | dns_enabled |
DNS |
bool | G/I | setup dnsmasq on this infra node? |
127 | dns_port |
DNS |
port | G | dns server listen port, 53 by default |
128 | dns_records |
DNS |
string[] | G | dynamic dns records resolved by dnsmasq |
129 | prometheus_enabled |
PROMETHEUS |
bool | G/I | enable prometheus on this infra node? |
130 | prometheus_clean |
PROMETHEUS |
bool | G/A | clean prometheus data during init? |
131 | prometheus_data |
PROMETHEUS |
path | G | prometheus data dir, /data/prometheus by default |
132 | prometheus_sd_interval |
PROMETHEUS |
interval | G | prometheus target refresh interval, 5s by default |
133 | prometheus_scrape_interval |
PROMETHEUS |
interval | G | prometheus scrape & eval interval, 10s by default |
134 | prometheus_scrape_timeout |
PROMETHEUS |
interval | G | prometheus global scrape timeout, 8s by default |
135 | prometheus_options |
PROMETHEUS |
arg | G | prometheus extra server options |
136 | pushgateway_enabled |
PROMETHEUS |
bool | G/I | setup pushgateway on this infra node? |
137 | pushgateway_options |
PROMETHEUS |
arg | G | pushgateway extra server options |
138 | blackbox_enabled |
PROMETHEUS |
bool | G/I | setup blackbox_exporter on this infra node? |
139 | blackbox_options |
PROMETHEUS |
arg | G | blackbox_exporter extra server options |
140 | alertmanager_enabled |
PROMETHEUS |
bool | G/I | setup alertmanager on this infra node? |
141 | alertmanager_options |
PROMETHEUS |
arg | G | alertmanager extra server options |
142 | exporter_metrics_path |
PROMETHEUS |
path | G | exporter metric path, /metrics by default |
143 | exporter_install |
PROMETHEUS |
enum | G | how to install exporter? none,yum,binary |
144 | exporter_repo_url |
PROMETHEUS |
url | G | exporter repo file url if install exporter via yum |
145 | grafana_enabled |
GRAFANA |
bool | G/I | enable grafana on this infra node? |
146 | grafana_clean |
GRAFANA |
bool | G/A | clean grafana data during init? |
147 | grafana_admin_username |
GRAFANA |
username | G | grafana admin username, admin by default |
148 | grafana_admin_password |
GRAFANA |
password | G | grafana admin password, pigsty by default |
149 | grafana_plugin_cache |
GRAFANA |
path | G | path to grafana plugins cache tarball |
150 | grafana_plugin_list |
GRAFANA |
string[] | G | grafana plugins to be downloaded with grafana-cli |
151 | loki_enabled |
LOKI |
bool | G/I | enable loki on this infra node? |
152 | loki_clean |
LOKI |
bool | G/A | whether remove existing loki data? |
153 | loki_data |
LOKI |
path | G | loki data dir, /data/loki by default |
154 | loki_retention |
LOKI |
interval | G | loki log retention period, 15d by default |
201 | nodename |
NODE_ID |
string | I | node instance identity, use hostname if missing, optional |
202 | node_cluster |
NODE_ID |
string | C | node cluster identity, use 'nodes' if missing, optional |
203 | nodename_overwrite |
NODE_ID |
bool | C | overwrite node's hostname with nodename? |
204 | nodename_exchange |
NODE_ID |
bool | C | exchange nodename among play hosts? |
205 | node_id_from_pg |
NODE_ID |
bool | C | use postgres identity as node identity if applicable? |
206 | node_default_etc_hosts |
NODE_DNS |
string[] | G | static dns records in /etc/hosts
|
207 | node_etc_hosts |
NODE_DNS |
string[] | C | extra static dns records in /etc/hosts
|
208 | node_dns_method |
NODE_DNS |
enum | C | how to handle dns servers: add,none,overwrite |
209 | node_dns_servers |
NODE_DNS |
string[] | C | dynamic nameserver in /etc/resolv.conf
|
210 | node_dns_options |
NODE_DNS |
string[] | C | dns resolv options in /etc/resolv.conf
|
211 | node_repo_method |
NODE_PACKAGE |
enum | C | how to setup node repo: none,local,public |
212 | node_repo_remove |
NODE_PACKAGE |
bool | C | remove existing repo on node? |
213 | node_repo_local_urls |
NODE_PACKAGE |
string[] | C | local repo url, if node_repo_method = local |
214 | node_packages |
NODE_PACKAGE |
string[] | C | packages to be installed current nodes |
215 | node_default_packages |
NODE_PACKAGE |
string[] | G | default packages to be installed on all nodes |
216 | node_disable_firewall |
NODE_TUNE |
bool | C | disable node firewall? true by default |
217 | node_disable_selinux |
NODE_TUNE |
bool | C | disable node selinux? true by default |
218 | node_disable_numa |
NODE_TUNE |
bool | C | disable node numa, reboot required |
219 | node_disable_swap |
NODE_TUNE |
bool | C | disable node swap, use with caution |
220 | node_static_network |
NODE_TUNE |
bool | C | preserve dns resolver settings after reboot |
221 | node_disk_prefetch |
NODE_TUNE |
bool | C | setup disk prefetch on HDD to increase performance |
222 | node_kernel_modules |
NODE_TUNE |
string[] | C | kernel modules to be enabled on this node |
223 | node_hugepage_ratio |
NODE_TUNE |
float | C | node mem hugepage ratio, 0 disable it by default |
224 | node_tune |
NODE_TUNE |
enum | C | node tuned profile: none,oltp,olap,crit,tiny |
225 | node_sysctl_params |
NODE_TUNE |
dict | C | sysctl parameters in k:v format in addition to tuned |
226 | node_data |
NODE_ADMIN |
path | C | node main data directory, /data by default |
227 | node_admin_enabled |
NODE_ADMIN |
bool | C | create a admin user on target node? |
228 | node_admin_uid |
NODE_ADMIN |
int | C | uid and gid for node admin user |
229 | node_admin_username |
NODE_ADMIN |
username | C | name of node admin user, dba by default |
230 | node_admin_ssh_exchange |
NODE_ADMIN |
bool | C | exchange admin ssh key among node cluster |
231 | node_admin_pk_current |
NODE_ADMIN |
bool | C | add current user's ssh pk to admin authorized_keys |
232 | node_admin_pk_list |
NODE_ADMIN |
string[] | C | ssh public keys to be added to admin user |
233 | node_timezone |
NODE_TIME |
string | C | setup node timezone, empty string to skip |
234 | node_ntp_enabled |
NODE_TIME |
bool | C | enable chronyd time sync service? |
235 | node_ntp_servers |
NODE_TIME |
string[] | C | ntp servers in /etc/chrony.conf
|
236 | node_crontab_overwrite |
NODE_TIME |
bool | C | overwrite or append to /etc/crontab ? |
237 | node_crontab |
NODE_TIME |
string[] | C | crontab entries in /etc/crontab
|
260 | haproxy_enabled |
HAPROXY |
bool | C | enable haproxy on this node? |
261 | haproxy_clean |
HAPROXY |
bool | G/C/A | cleanup all existing haproxy config? |
262 | haproxy_reload |
HAPROXY |
bool | A | reload haproxy after config? |
263 | haproxy_auth_enabled |
HAPROXY |
bool | G | enable authentication for haproxy admin page |
264 | haproxy_admin_username |
HAPROXY |
username | G | haproxy admin username, admin by default |
265 | haproxy_admin_password |
HAPROXY |
password | G | haproxy admin password, pigsty by default |
266 | haproxy_exporter_port |
HAPROXY |
port | C | haproxy admin/exporter port, 9101 by default |
267 | haproxy_client_timeout |
HAPROXY |
interval | C | client side connection timeout, 24h by default |
268 | haproxy_server_timeout |
HAPROXY |
interval | C | server side connection timeout, 24h by default |
269 | haproxy_services |
HAPROXY |
service[] | C | list of haproxy service to be exposed on node |
271 | docker_enabled |
DOCKER |
bool | C | enable docker on this node? |
272 | docker_cgroups_driver |
DOCKER |
enum | C | docker cgroup fs driver: cgroupfs,systemd |
273 | docker_registry_mirrors |
DOCKER |
string[] | C | docker registry mirror list |
274 | docker_image_cache |
DOCKER |
path | C | docker image cache dir, /tmp/docker by default |
280 | node_exporter_enabled |
NODE_EXPORTER |
bool | C | setup node_exporter on this node? |
281 | node_exporter_port |
NODE_EXPORTER |
port | C | node exporter listen port, 9100 by default |
282 | node_exporter_options |
NODE_EXPORTER |
arg | C | extra server options for node_exporter |
283 | promtail_enabled |
PROMTAIL |
bool | C | enable promtail logging collector? |
284 | promtail_clean |
PROMTAIL |
bool | G/A | purge existing promtail status file during init? |
285 | promtail_port |
PROMTAIL |
port | C | promtail listen port, 9080 by default |
286 | promtail_positions |
PROMTAIL |
path | C | promtail position status file path |
301 | etcd_seq |
ETCD |
int | I | etcd instance identifier, REQUIRED |
302 | etcd_cluster |
ETCD |
string | C | etcd cluster & group name, etcd by default |
303 | etcd_safeguard |
ETCD |
bool | G/C/A | prevent purging running etcd instance? |
304 | etcd_clean |
ETCD |
bool | G/C/A | purging existing etcd during initialization? |
305 | etcd_data |
ETCD |
path | C | etcd data directory, /data/etcd by default |
306 | etcd_port |
ETCD |
port | C | etcd client port, 2379 by default |
307 | etcd_peer_port |
ETCD |
port | C | etcd peer port, 2380 by default |
308 | etcd_init |
ETCD |
enum | C | etcd initial cluster state, new or existing |
309 | etcd_election_timeout |
ETCD |
int | C | etcd election timeout, 1000ms by default |
310 | etcd_heartbeat_interval |
ETCD |
int | C | etcd heartbeat interval, 100ms by default |
401 | minio_seq |
MINIO |
int | I | minio instance identifier, REQUIRED |
402 | minio_cluster |
MINIO |
string | C | minio cluster name, minio by default |
403 | minio_clean |
MINIO |
bool | G/C/A | cleanup minio during init?, false by default |
404 | minio_user |
MINIO |
username | C | minio os user, minio by default |
405 | minio_node |
MINIO |
string | C | minio node name pattern |
406 | minio_data |
MINIO |
path | C | minio data dir(s), use {x...y} to specify multi drivers |
407 | minio_domain |
MINIO |
string | G | minio external domain name, sss.pigsty by default |
408 | minio_port |
MINIO |
port | C | minio service port, 9000 by default |
409 | minio_admin_port |
MINIO |
port | C | minio console port, 9001 by default |
410 | minio_access_key |
MINIO |
username | C | root access key, minioadmin by default |
411 | minio_secret_key |
MINIO |
password | C | root secret key, minioadmin by default |
412 | minio_extra_vars |
MINIO |
string | C | extra environment variables for minio server |
413 | minio_alias |
MINIO |
string | G | alias name for local minio deployment |
414 | minio_buckets |
MINIO |
bucket[] | C | list of minio bucket to be created |
415 | minio_users |
MINIO |
user[] | C | list of minio user to be created |
501 | pg_mode |
PG_ID |
enum | C | pgsql cluster mode: pgsql,citus,gpsql |
502 | pg_cluster |
PG_ID |
string | C | pgsql cluster name, REQUIRED identity parameter |
503 | pg_seq |
PG_ID |
int | I | pgsql instance seq number, REQUIRED identity parameter |
504 | pg_role |
PG_ID |
enum | I | pgsql role, REQUIRED, could be primary,replica,offline |
505 | pg_instances |
PG_ID |
dict | I | define multiple pg instances on node in {port:ins_vars} format |
506 | pg_upstream |
PG_ID |
ip | I | repl upstream ip addr for standby cluster or cascade replica |
507 | pg_shard |
PG_ID |
string | C | pgsql shard name, optional identity for sharding clusters |
508 | pg_group |
PG_ID |
int | C | pgsql shard index number, optional identity for sharding clusters |
509 | gp_role |
PG_ID |
enum | C | greenplum role of this cluster, could be master or segment |
510 | pg_exporters |
PG_ID |
dict | C | additional pg_exporters to monitor remote postgres instances |
511 | pg_offline_query |
PG_ID |
bool | G | set to true to enable offline query on this instance |
512 | pg_weight |
PG_ID |
int | G | relative load balance weight in service, 100 by default, 0-255 |
520 | pg_users |
PG_BUSINESS |
user[] | C | postgres business users |
521 | pg_databases |
PG_BUSINESS |
database[] | C | postgres business databases |
522 | pg_services |
PG_BUSINESS |
service[] | C | postgres business services |
523 | pg_hba_rules |
PG_BUSINESS |
hba[] | C | business hba rules for postgres |
524 | pgb_hba_rules |
PG_BUSINESS |
hba[] | C | business hba rules for pgbouncer |
530 | pg_replication_username |
PG_BUSINESS |
username | G | postgres replication username, replicator by default |
531 | pg_replication_password |
PG_BUSINESS |
password | G | postgres replication password, DBUser.Replicator by default |
532 | pg_admin_username |
PG_BUSINESS |
username | G | postgres admin username, dbuser_dba by default |
533 | pg_admin_password |
PG_BUSINESS |
password | G | postgres admin password in plain text, DBUser.DBA by default |
534 | pg_monitor_username |
PG_BUSINESS |
username | G | postgres monitor username, dbuser_monitor by default |
535 | pg_monitor_password |
PG_BUSINESS |
password | G | postgres monitor password, DBUser.Monitor by default |
540 | pg_dbsu |
PG_INSTALL |
username | C | os dbsu name, postgres by default, better not change it |
541 | pg_dbsu_uid |
PG_INSTALL |
int | C | os dbsu uid and gid, 26 for default postgres users and groups |
542 | pg_dbsu_sudo |
PG_INSTALL |
enum | C | dbsu sudo privilege, none,limit,all,nopass. limit by default |
543 | pg_dbsu_home |
PG_INSTALL |
path | C | postgresql home directory, /var/lib/pgsql by default |
544 | pg_dbsu_ssh_exchange |
PG_INSTALL |
bool | C | exchange postgres dbsu ssh key among same pgsql cluster |
545 | pg_version |
PG_INSTALL |
enum | C | postgres major version to be installed, 15 by default |
546 | pg_bin_dir |
PG_INSTALL |
path | C | postgres binary dir, /usr/pgsql/bin by default |
547 | pg_log_dir |
PG_INSTALL |
path | C | postgres log dir, /pg/log/postgres by default |
548 | pg_packages |
PG_INSTALL |
string[] | C | pg packages to be installed, ${pg_version} will be replaced |
549 | pg_extensions |
PG_INSTALL |
string[] | C | pg extensions to be installed, ${pg_version} will be replaced |
550 | pg_safeguard |
PG_BOOTSTRAP |
bool | G/C/A | prevent purging running postgres instance? false by default |
551 | pg_clean |
PG_BOOTSTRAP |
bool | G/C/A | purging existing postgres during pgsql init? true by default |
552 | pg_data |
PG_BOOTSTRAP |
path | C | postgres data directory, /pg/data by default |
553 | pg_fs_main |
PG_BOOTSTRAP |
path | C | mountpoint/path for postgres main data, /data by default |
554 | pg_fs_bkup |
PG_BOOTSTRAP |
path | C | mountpoint/path for pg backup data, /data/backup by default |
555 | pg_storage_type |
PG_BOOTSTRAP |
enum | C | storage type for pg main data, SSD,HDD, SSD by default |
556 | pg_dummy_filesize |
PG_BOOTSTRAP |
size | C | size of /pg/dummy , hold 64MB disk space for emergency use |
557 | pg_listen |
PG_BOOTSTRAP |
ip | C | postgres listen address, 0.0.0.0 (all ipv4 addr) by default |
558 | pg_port |
PG_BOOTSTRAP |
port | C | postgres listen port, 5432 by default |
559 | pg_localhost |
PG_BOOTSTRAP |
path | C | postgres unix socket dir for localhost connection |
560 | pg_namespace |
PG_BOOTSTRAP |
path | C | top level key namespace in etcd, used by patroni & vip |
561 | patroni_enabled |
PG_BOOTSTRAP |
bool | C | if disabled, no postgres cluster will be created during init |
562 | patroni_mode |
PG_BOOTSTRAP |
enum | C | patroni working mode: default,pause,remove |
563 | patroni_port |
PG_BOOTSTRAP |
port | C | patroni listen port, 8008 by default |
564 | patroni_log_dir |
PG_BOOTSTRAP |
path | C | patroni log dir, /pg/log/patroni by default |
565 | patroni_ssl_enabled |
PG_BOOTSTRAP |
bool | G | secure patroni RestAPI communications with SSL? |
566 | patroni_watchdog_mode |
PG_BOOTSTRAP |
enum | C | patroni watchdog mode: automatic,required,off. off by default |
567 | patroni_username |
PG_BOOTSTRAP |
username | C | patroni restapi username, postgres by default |
568 | patroni_password |
PG_BOOTSTRAP |
password | C | patroni restapi password, Patroni.API by default |
569 | patroni_citus_db |
PG_BOOTSTRAP |
string | C | citus database managed by patroni, postgres by default |
570 | pg_conf |
PG_BOOTSTRAP |
enum | C | config template: oltp,olap,crit,tiny. oltp.yml by default |
571 | pg_max_conn |
PG_BOOTSTRAP |
int | C | postgres max connections, auto will use recommended value |
572 | pg_shmem_ratio |
PG_BOOTSTRAP |
float | C | postgres shared memory ratio, 0.25 by default, 0.1~0.4 |
573 | pg_rto |
PG_BOOTSTRAP |
int | C | recovery time objective in seconds, 30s by default |
574 | pg_rpo |
PG_BOOTSTRAP |
int | C | recovery point objective in bytes, 1MiB at most by default |
575 | pg_libs |
PG_BOOTSTRAP |
string | C | preloaded libraries, pg_stat_statements,auto_explain by default |
576 | pg_delay |
PG_BOOTSTRAP |
interval | I | replication apply delay for standby cluster leader |
577 | pg_checksum |
PG_BOOTSTRAP |
bool | C | enable data checksum for postgres cluster? |
578 | pg_pwd_enc |
PG_BOOTSTRAP |
enum | C | passwords encryption algorithm: md5,scram-sha-256 |
579 | pg_encoding |
PG_BOOTSTRAP |
enum | C | database cluster encoding, UTF8 by default |
580 | pg_locale |
PG_BOOTSTRAP |
enum | C | database cluster local, C by default |
581 | pg_lc_collate |
PG_BOOTSTRAP |
enum | C | database cluster collate, C by default |
582 | pg_lc_ctype |
PG_BOOTSTRAP |
enum | C | database character type, en_US.UTF8 by default |
583 | pgbouncer_enabled |
PG_BOOTSTRAP |
bool | C | if disabled, pgbouncer will not be launched on pgsql host |
584 | pgbouncer_port |
PG_BOOTSTRAP |
port | C | pgbouncer listen port, 6432 by default |
585 | pgbouncer_log_dir |
PG_BOOTSTRAP |
path | C | pgbouncer log dir, /pg/log/pgbouncer by default |
586 | pgbouncer_auth_query |
PG_BOOTSTRAP |
bool | C | query postgres to retrieve unlisted business users? |
587 | pgbouncer_poolmode |
PG_BOOTSTRAP |
enum | C | pooling mode: transaction,session,statement, transaction by default |
588 | pgbouncer_sslmode |
PG_BOOTSTRAP |
enum | C | pgbouncer client ssl mode, disable by default |
600 | pg_provision |
PG_PROVISION |
bool | C | provision postgres cluster after bootstrap |
601 | pg_init |
PG_PROVISION |
string | G/C | provision init script for cluster template, pg-init by default |
602 | pg_default_roles |
PG_PROVISION |
role[] | G/C | default roles and users in postgres cluster |
603 | pg_default_privileges |
PG_PROVISION |
string[] | G/C | default privileges when created by admin user |
604 | pg_default_schemas |
PG_PROVISION |
string[] | G/C | default schemas to be created |
605 | pg_default_extensions |
PG_PROVISION |
extension[] | G/C | default extensions to be created |
606 | pg_reload |
PG_PROVISION |
bool | A | reload postgres after hba changes |
607 | pg_default_hba_rules |
PG_PROVISION |
hba[] | G/C | postgres default host-based authentication rules |
608 | pgb_default_hba_rules |
PG_PROVISION |
hba[] | G/C | pgbouncer default host-based authentication rules |
609 | pg_default_service_dest |
PG_PROVISION |
enum | G/C | default service destination if svc.dest='default' |
610 | pg_default_services |
PG_PROVISION |
service[] | G/C | postgres default service definitions |
620 | pgbackrest_enabled |
PG_BACKUP |
bool | C | enable pgbackrest on pgsql host? |
621 | pgbackrest_clean |
PG_BACKUP |
bool | C | remove pg backup data during init? |
622 | pgbackrest_log_dir |
PG_BACKUP |
path | C | pgbackrest log dir, /pg/log/pgbackrest by default |
623 | pgbackrest_method |
PG_BACKUP |
enum | C | pgbackrest repo method: local,minio,etc... |
624 | pgbackrest_repo |
PG_BACKUP |
dict | G/C | pgbackrest repo: https://pgbackrest.org/configuration.html#section-repository |
630 | pg_vip_enabled |
PG_VIP |
bool | C | enable a l2 vip for pgsql primary? false by default |
631 | pg_vip_address |
PG_VIP |
cidr4 | C | vip address in <ipv4>/<mask> format, require if vip is enabled |
632 | pg_vip_interface |
PG_VIP |
string | C/I | vip network interface to listen, eth0 by default |
633 | pg_dns_suffix |
PG_DNS |
string | C | pgsql dns suffix, '' by default |
634 | pg_dns_target |
PG_DNS |
enum | C | auto, primary, vip, none, or ad hoc ip |
640 | pg_exporter_enabled |
PG_EXPORTER |
bool | C | enable pg_exporter on pgsql hosts? |
641 | pg_exporter_config |
PG_EXPORTER |
string | C | pg_exporter configuration file name |
642 | pg_exporter_cache_ttls |
PG_EXPORTER |
string | C | pg_exporter collector ttl stage in seconds, '1,10,60,300' by default |
643 | pg_exporter_port |
PG_EXPORTER |
port | C | pg_exporter listen port, 9630 by default |
644 | pg_exporter_params |
PG_EXPORTER |
string | C | extra url parameters for pg_exporter dsn |
645 | pg_exporter_url |
PG_EXPORTER |
pgurl | C | overwrite auto-generate pg dsn if specified |
646 | pg_exporter_auto_discovery |
PG_EXPORTER |
bool | C | enable auto database discovery? enabled by default |
647 | pg_exporter_exclude_database |
PG_EXPORTER |
string | C | csv of database that WILL NOT be monitored during auto-discovery |
648 | pg_exporter_include_database |
PG_EXPORTER |
string | C | csv of database that WILL BE monitored during auto-discovery |
649 | pg_exporter_connect_timeout |
PG_EXPORTER |
int | C | pg_exporter connect timeout in ms, 200 by default |
650 | pg_exporter_options |
PG_EXPORTER |
arg | C | overwrite extra options for pg_exporter |
651 | pgbouncer_exporter_enabled |
PG_EXPORTER |
bool | C | enable pgbouncer_exporter on pgsql hosts? |
652 | pgbouncer_exporter_port |
PG_EXPORTER |
port | C | pgbouncer_exporter listen port, 9631 by default |
653 | pgbouncer_exporter_url |
PG_EXPORTER |
pgurl | C | overwrite auto-generate pgbouncer dsn if specified |
654 | pgbouncer_exporter_options |
PG_EXPORTER |
arg | C | overwrite extra options for pgbouncer_exporter |
701 | redis_cluster |
REDIS_ID |
string | C | redis cluster name, required identity parameter |
702 | redis_instances |
REDIS_ID |
dict | I | redis instances definition on this redis node |
703 | redis_node |
REDIS_ID |
int | I | redis node sequence number, node int id required |
704 | redis_fs_main |
REDIS_NODE |
path | C | redis main data mountpoint, /data by default |
705 | redis_exporter_enabled |
REDIS_NODE |
bool | C | install redis exporter on redis nodes? |
706 | redis_exporter_port |
REDIS_NODE |
port | C | redis exporter listen port, 9121 by default |
707 | redis_exporter_options |
REDIS_NODE |
string | C/I | cli args and extra options for redis exporter |
708 | redis_safeguard |
REDIS_PROVISION |
bool | C | prevent purging running redis instance? |
709 | redis_clean |
REDIS_PROVISION |
bool | C | purging existing redis during init? |
710 | redis_rmdata |
REDIS_PROVISION |
bool | A | remove redis data when purging redis server? |
711 | redis_mode |
REDIS_PROVISION |
enum | C | redis mode: standalone,cluster,sentinel |
712 | redis_conf |
REDIS_PROVISION |
string | C | redis config template path, except sentinel |
713 | redis_bind_address |
REDIS_PROVISION |
ip | C | redis bind address, empty string will use host ip |
714 | redis_max_memory |
REDIS_PROVISION |
size | C/I | max memory used by each redis instance |
715 | redis_mem_policy |
REDIS_PROVISION |
enum | C | redis memory eviction policy |
716 | redis_password |
REDIS_PROVISION |
password | C | redis password, empty string will disable password |
717 | redis_rdb_save |
REDIS_PROVISION |
string[] | C | redis rdb save directives, disable with empty list |
718 | redis_aof_enabled |
REDIS_PROVISION |
bool | C | enable redis append only file? |
719 | redis_rename_commands |
REDIS_PROVISION |
dict | C | rename redis dangerous commands |
720 | redis_cluster_replicas |
REDIS_PROVISION |
int | C | replica number for one master in redis cluster |
Parameters about pigsty infrastructure components: local yum repo, nginx, dnsmasq, prometheus, grafana, loki, alertmanager, pushgateway, blackbox_exporter, etc...
This section contains some metadata of current pigsty deployments, such as version string, admin node IP address, repo mirror region
and http(s) proxy when downloading pacakges.
version: v2.0.0-b6 # pigsty version string
admin_ip: 10.10.10.10 # admin node ip address
region: default # upstream mirror region: default,china,europe
proxy_env: # global proxy env when downloading packages
no_proxy: "localhost,127.0.0.1,10.0.0.0/8,192.168.0.0/16,*.pigsty,*.aliyun.com,mirrors.*,*.myqcloud.com,*.tsinghua.edu.cn"
# http_proxy: # set your proxy here: e.g http://user:pass@proxy.xxx.com
# https_proxy: # set your proxy here: e.g http://user:pass@proxy.xxx.com
# all_proxy: # set your proxy here: e.g http://user:pass@proxy.xxx.com
name: version
, type: string
, level: G
pigsty version string
default value:v2.0.0-b6
It will be used for pigsty introspection & content rendering.
name: admin_ip
, type: ip
, level: G
admin node ip address
default value:10.10.10.10
Node with this ip address will be treated as admin node, usually point to the first node that install Pigsty.
The default value 10.10.10.10
is a placeholder which will be replaced during configure
This parameter is referenced by many other parameters, such as:
The exact string ${admin_ip}
will be replaced with the actual admin_ip
for above parameters.
name: region
, type: enum
, level: G
upstream mirror region: default,china,europe
default value: default
If a region other than default
is set, and there's a corresponding entry in repo_upstream.[repo].baseurl
, it will be used instead of default
.
For example, if china
is used, pigsty will use China mirrors designated in repo_upstream
if applicable.
name: proxy_env
, type: dict
, level: G
global proxy env when downloading packages
default value:
proxy_env: # global proxy env when downloading packages
http_proxy: 'http://username:password@proxy.address.com'
https_proxy: 'http://username:password@proxy.address.com'
all_proxy: 'http://username:password@proxy.address.com'
no_proxy: "localhost,127.0.0.1,10.0.0.0/8,192.168.0.0/16,*.pigsty,*.aliyun.com,mirrors.aliyuncs.com,mirrors.tuna.tsinghua.edu.cn,mirrors.zju.edu.cn"
It's quite important to use http proxy in restricted production environment, or your Internet access is blocked (e.g. Mainland China)
Self-Signed CA used by pigsty. It is required to support advanced security features.
ca_method: create # create,recreate,copy, create by default
ca_cn: pigsty-ca # ca common name, fixed as pigsty-ca
cert_validity: 7300d # cert validity, 20 years by default
name: ca_method
, type: enum
, level: G
available options: create,recreate,copy
default value: create
-
create
: Create a new CA public-private key pair if not exists, use if exists -
recreate
: Always re-create a new CA public-private key pair -
copy
: Copy the existing CA public and private keys from localfiles/pki/ca
, abort if missing
If you already have a pair of ca.crt
and ca.key
, put them under files/pki/ca
and set ca_method
to copy
.
name: ca_cn
, type: string
, level: G
ca common name, not recommending to change it.
default value: pigsty-ca
you can check that with openssl x509 -text -in /etc/pki/ca.crt
name: cert_validity
, type: interval
, level: G
cert validity, 20 years by default, which is enough for most scenarios
default value: 7300d
Infrastructure identity and portal definition.
#infra_seq: 1 # infra node identity, explicitly required
infra_portal: # infra services exposed via portal
home : { domain: h.pigsty }
grafana : { domain: g.pigsty ,endpoint: "${admin_ip}:3000" ,websocket: true }
prometheus : { domain: p.pigsty ,endpoint: "${admin_ip}:9090" }
alertmanager : { domain: a.pigsty ,endpoint: "${admin_ip}:9093" }
blackbox : { endpoint: "${admin_ip}:9115" }
loki : { endpoint: "${admin_ip}:3100" }
name: infra_seq
, type: int
, level: I
infra node identity, REQUIRED
no default value, you have to assign it explicitly.
name: infra_portal
, type: dict
, level: G
infra services exposed via portal
default value will expose home, grafana, prometheus, alertmanager via nginx with corresponding domain names.
infra_portal: # infra services exposed via portal
home : { domain: h.pigsty }
grafana : { domain: g.pigsty ,endpoint: "${admin_ip}:3000" ,websocket: true }
prometheus : { domain: p.pigsty ,endpoint: "${admin_ip}:9090" }
alertmanager : { domain: a.pigsty ,endpoint: "${admin_ip}:9093" }
blackbox : { endpoint: "${admin_ip}:9115" }
loki : { endpoint: "${admin_ip}:3100" }
Each record contains three subsections: key as name
, representing the component name, the external access domain, and the internal TCP port, respectively.
and the value contains domain
, and endpoint
,
The name
definition of the default record is fixed and referenced by other modules, so do not modify the default entry names.
The domain
is the domain name that should be used for external access to this upstream server. domain names will be added to Nginx SSL cert SAN.
The endpoint
is an internally reachable TCP port. and ${admin_ip}
will be replaced with actuall admin_ip
in runtime.
This section is about local yum repo, which is used by all other modules.
Pigsty is installed on a meta node. Pigsty pulls up a localYum repo for the current environment to install RPM packages.
During initialization, Pigsty downloads all packages and their dependencies (specified by repo_packages
) from the Internet upstream repo (specified by repo_upstream
) to {{ nginx_home }}
/ {{ repo_name }}
(default is /www/pigsty
). The total size of all dependent software is about 1GB or so.
When creating a localYum repo, Pigsty will skip the software download phase if the directory already exists and if there is a marker file named repo_complete
in the dir.
If the download speed of some packages is too slow, you can set the download proxy to complete the first download by using the proxy_env
config entry or directly download the pre-packaged offline package.
The offline package is a zip archive of the {{ nginx_home }}/{{ repo_name }}
dir pkg.tgz
. During configure
, if Pigsty finds the offline package /tmp/pkg.tgz
, it will extract it to {{ nginx_home }}/{{ repo_name }}
, skipping the software download step during installation.
The default offline package is based on CentOS 7.9.2011 x86_64; if you use a different OS, there may be RPM package conflict and dependency error problems; please refer to the FAQ to solve.
repo_enabled: true # create a yum repo on this infra node?
repo_home: /www # repo home dir, `/www` by default
repo_name: pigsty # repo name, pigsty by default
repo_endpoint: http://${admin_ip}:80 # access point to this repo by domain or ip:port
repo_remove: true # remove existing upstream repo
repo_upstream: # where to download #
- { name: base ,description: 'EL 7 Base' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/os/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/os/$basearch/' , europe: 'https://mirrors.xtom.de/centos/$releasever/os/$basearch/' }}
- { name: updates ,description: 'EL 7 Updates' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/updates/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/updates/$basearch/' , europe: 'https://mirrors.xtom.de/centos/$releasever/updates/$basearch/' }}
- { name: extras ,description: 'EL 7 Extras' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/extras/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/extras/$basearch/' , europe: 'https://mirrors.xtom.de/centos/$releasever/extras/$basearch/' }}
- { name: epel ,description: 'EL 7 EPEL' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://download.fedoraproject.org/pub/epel/$releasever/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/epel/$releasever/$basearch/' , europe: 'https://mirrors.xtom.de/epel/$releasever/$basearch/' }}
- { name: centos-sclo ,description: 'EL 7 SCLo' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/sclo/$basearch/sclo/' , china: 'https://mirrors.aliyun.com/centos/$releasever/sclo/$basearch/sclo/' , europe: 'https://mirrors.xtom.de/centos/$releasever/sclo/$basearch/sclo/' }}
- { name: centos-sclo-rh ,description: 'EL 7 SCLo rh' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/sclo/$basearch/rh/' , china: 'https://mirrors.aliyun.com/centos/$releasever/sclo/$basearch/rh/' , europe: 'https://mirrors.xtom.de/centos/$releasever/sclo/$basearch/rh/' }}
- { name: baseos ,description: 'EL 8+ BaseOS' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/BaseOS/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/BaseOS/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/BaseOS/$basearch/os/' }}
- { name: appstream ,description: 'EL 8+ AppStream' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/AppStream/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/AppStream/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/AppStream/$basearch/os/' }}
- { name: extras ,description: 'EL 8+ Extras' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/extras/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/extras/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/extras/$basearch/os/' }}
- { name: epel ,description: 'EL 8+ EPEL' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'http://download.fedoraproject.org/pub/epel/$releasever/Everything/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/epel/$releasever/Everything/$basearch/' , europe: 'https://mirrors.xtom.de/epel/$releasever/Everything/$basearch/' }}
- { name: powertools ,description: 'EL 8 PowerTools' ,module: node ,releases: [ 8 ] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/PowerTools/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/PowerTools/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/PowerTools/$basearch/os/' }}
- { name: crb ,description: 'EL 9 CRB' ,module: node ,releases: [ 9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/CRB/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/CRB/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/CRB/$basearch/os/' }}
- { name: grafana ,description: 'Grafana' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://packages.grafana.com/oss/rpm' , china: 'https://mirrors.tuna.tsinghua.edu.cn/grafana/yum/rpm' }}
- { name: prometheus ,description: 'Prometheus' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://packagecloud.io/prometheus-rpm/release/el/$releasever/$basearch' }}
- { name: nginx ,description: 'Nginx Repo' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://nginx.org/packages/centos/$releasever/$basearch/' }}
- { name: docker-ce ,description: 'Docker CE' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://download.docker.com/linux/centos/$releasever/$basearch/stable' , china: 'https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/stable' , europe: 'https://mirrors.xtom.de/docker-ce/linux/centos/$releasever/$basearch/stable' }}
- { name: pgdg15 ,description: 'PostgreSQL 15' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/15/redhat/rhel-$releasever-$basearch' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/15/redhat/rhel-$releasever-$basearch' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/15/redhat/rhel-$releasever-$basearch' }}
- { name: pgdg-common ,description: 'PostgreSQL Common' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/common/redhat/rhel-$releasever-$basearch' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/common/redhat/rhel-$releasever-$basearch' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/common/redhat/rhel-$releasever-$basearch' }}
- { name: pgdg-extras ,description: 'PostgreSQL Extra' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/common/pgdg-rhel$releasever-extras/redhat/rhel-$releasever-$basearch' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/common/pgdg-rhel$releasever-extras/redhat/rhel-$releasever-$basearch' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/common/pgdg-rhel$releasever-extras/redhat/rhel-$releasever-$basearch' }}
- { name: pgdg-el8fix ,description: 'PostgreSQL EL8FIX' ,module: pgsql ,releases: [ 8 ] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/common/pgdg-centos8-sysupdates/redhat/rhel-8-x86_64/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/common/pgdg-centos8-sysupdates/redhat/rhel-8-x86_64/' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/common/pgdg-centos8-sysupdates/redhat/rhel-8-x86_64/' }}
- { name: timescaledb ,description: 'TimescaleDB' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://packagecloud.io/timescale/timescaledb/el/$releasever/$basearch' }}
- { name: citus ,description: 'Citus Community' ,module: pgsql ,releases: [7 ] ,baseurl: { default: 'https://repos.citusdata.com/community/el/$releasever/$basearch' }}
repo_packages: # which packages to be included
- grafana loki logcli promtail prometheus2 alertmanager pushgateway blackbox_exporter node_exporter redis_exporter
- nginx nginx_exporter wget createrepo_c sshpass ansible python3 python3-pip python3-requests python3-jmespath mtail dnsmasq docker-ce docker-compose-plugin etcd
- lz4 unzip bzip2 zlib yum pv jq git ncdu make patch bash lsof wget uuid tuned chrony perf flamegraph nvme-cli numactl grubby sysstat iotop htop modulemd-tools
- netcat socat rsync ftp lrzsz s3cmd net-tools tcpdump ipvsadm bind-utils telnet audit ca-certificates openssl openssh-clients readline vim-minimal haproxy redis
- postgresql15* postgis33_15* citus_15* pglogical_15* pg_squeeze_15* wal2json_15* pg_repack_15* timescaledb-2-postgresql-15* timescaledb-tools libuser openldap-compat
- patroni patroni-etcd pgbouncer pgbadger pgbackrest tail_n_mail pgloader pg_activity
- orafce_15* mysqlcompat_15 mongo_fdw_15* tds_fdw_15* mysql_fdw_15 hdfs_fdw_15 sqlite_fdw_15 pgbouncer_fdw_15 pg_dbms_job_15
- pg_stat_kcache_15* pg_stat_monitor_15* pg_qualstats_15 pg_track_settings_15 pg_wait_sampling_15 system_stats_15 logerrors_15 pg_top_15
- plprofiler_15* plproxy_15 plsh_15* pldebugger_15 plpgsql_check_15* pgtt_15 pgq_15* pgsql_tweaks_15 count_distinct_15 hypopg_15
- timestamp9_15* semver_15* prefix_15* rum_15 geoip_15 periods_15 ip4r_15 tdigest_15 hll_15 pgmp_15 extra_window_functions_15 topn_15
- pg_comparator_15 pg_ivm_15* pgsodium_15* pgfincore_15* ddlx_15 credcheck_15 postgresql_anonymizer_15* postgresql_faker_15 safeupdate_15
- pg_fkpart_15 pg_jobmon_15 pg_partman_15 pg_permissions_15 pgaudit17_15 pgexportdoc_15 pgimportdoc_15 pg_statement_rollback_15*
- pg_cron_15 pg_background_15 e-maj_15 pg_catcheck_15 pg_prioritize_15 pgcopydb_15 pg_filedump_15 pgcryptokey_15
repo_url_packages: # extra packages from url
- https://github.com/Vonng/pg_exporter/releases/download/v0.5.0/pg_exporter-0.5.0.x86_64.rpm
- https://github.com/cybertec-postgresql/vip-manager/releases/download/v2.1.0/vip-manager_2.1.0_Linux_x86_64.rpm
- https://github.com/dalibo/pev2/releases/download/v1.7.0/index.html
- https://dl.min.io/server/minio/release/linux-amd64/archive/minio-20230131022419.0.0.x86_64.rpm
- https://dl.min.io/client/mc/release/linux-amd64/archive/mcli-20230128202938.0.0.x86_64.rpm
name: repo_enabled
, type: bool
, level: G/I
create a yum repo on this infra node?
default value: true
If you have multiple infra nodes, you can disable yum repo on other standby nodes to reduce Internet traffic.
name: repo_home
, type: path
, level: G
repo home dir, /www
by default
default value: /www
name: repo_name
, type: string
, level: G
repo name, pigsty by default
default value: pigsty
name: repo_endpoint
, type: url
, level: G
access point to this repo by domain or ip:port
default value: http://${admin_ip}:80
name: repo_remove
, type: bool
, level: G/A
remove existing upstream repo
default value: true
name: repo_upstream
, type: upstream[]
, level: G
where to download upstream packages
default values:
repo_upstream: # where to download #
- { name: base ,description: 'EL 7 Base' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/os/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/os/$basearch/' , europe: 'https://mirrors.xtom.de/centos/$releasever/os/$basearch/' }}
- { name: updates ,description: 'EL 7 Updates' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/updates/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/updates/$basearch/' , europe: 'https://mirrors.xtom.de/centos/$releasever/updates/$basearch/' }}
- { name: extras ,description: 'EL 7 Extras' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/extras/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/extras/$basearch/' , europe: 'https://mirrors.xtom.de/centos/$releasever/extras/$basearch/' }}
- { name: epel ,description: 'EL 7 EPEL' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://download.fedoraproject.org/pub/epel/$releasever/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/epel/$releasever/$basearch/' , europe: 'https://mirrors.xtom.de/epel/$releasever/$basearch/' }}
- { name: centos-sclo ,description: 'EL 7 SCLo' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/sclo/$basearch/sclo/' , china: 'https://mirrors.aliyun.com/centos/$releasever/sclo/$basearch/sclo/' , europe: 'https://mirrors.xtom.de/centos/$releasever/sclo/$basearch/sclo/' }}
- { name: centos-sclo-rh ,description: 'EL 7 SCLo rh' ,module: node ,releases: [7 ] ,baseurl: { default: 'http://mirror.centos.org/centos/$releasever/sclo/$basearch/rh/' , china: 'https://mirrors.aliyun.com/centos/$releasever/sclo/$basearch/rh/' , europe: 'https://mirrors.xtom.de/centos/$releasever/sclo/$basearch/rh/' }}
- { name: baseos ,description: 'EL 8+ BaseOS' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/BaseOS/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/BaseOS/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/BaseOS/$basearch/os/' }}
- { name: appstream ,description: 'EL 8+ AppStream' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/AppStream/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/AppStream/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/AppStream/$basearch/os/' }}
- { name: extras ,description: 'EL 8+ Extras' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/extras/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/extras/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/extras/$basearch/os/' }}
- { name: epel ,description: 'EL 8+ EPEL' ,module: node ,releases: [ 8,9] ,baseurl: { default: 'http://download.fedoraproject.org/pub/epel/$releasever/Everything/$basearch/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/epel/$releasever/Everything/$basearch/' , europe: 'https://mirrors.xtom.de/epel/$releasever/Everything/$basearch/' }}
- { name: powertools ,description: 'EL 8 PowerTools' ,module: node ,releases: [ 8 ] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/PowerTools/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/PowerTools/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/PowerTools/$basearch/os/' }}
- { name: crb ,description: 'EL 9 CRB' ,module: node ,releases: [ 9] ,baseurl: { default: 'https://dl.rockylinux.org/pub/rocky/$releasever/CRB/$basearch/os/' , china: 'https://mirrors.aliyun.com/rockylinux/$releasever/CRB/$basearch/os/' , europe: 'https://mirrors.xtom.de/rocky/$releasever/CRB/$basearch/os/' }}
- { name: grafana ,description: 'Grafana' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://packages.grafana.com/oss/rpm' , china: 'https://mirrors.tuna.tsinghua.edu.cn/grafana/yum/rpm' }}
- { name: prometheus ,description: 'Prometheus' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://packagecloud.io/prometheus-rpm/release/el/$releasever/$basearch' }}
- { name: nginx ,description: 'Nginx Repo' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://nginx.org/packages/centos/$releasever/$basearch/' }}
- { name: docker-ce ,description: 'Docker CE' ,module: infra ,releases: [7,8,9] ,baseurl: { default: 'https://download.docker.com/linux/centos/$releasever/$basearch/stable' , china: 'https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/$releasever/$basearch/stable' , europe: 'https://mirrors.xtom.de/docker-ce/linux/centos/$releasever/$basearch/stable' }}
- { name: pgdg15 ,description: 'PostgreSQL 15' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/15/redhat/rhel-$releasever-$basearch' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/15/redhat/rhel-$releasever-$basearch' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/15/redhat/rhel-$releasever-$basearch' }}
- { name: pgdg-common ,description: 'PostgreSQL Common' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/common/redhat/rhel-$releasever-$basearch' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/common/redhat/rhel-$releasever-$basearch' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/common/redhat/rhel-$releasever-$basearch' }}
- { name: pgdg-extras ,description: 'PostgreSQL Extra' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/common/pgdg-rhel$releasever-extras/redhat/rhel-$releasever-$basearch' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/common/pgdg-rhel$releasever-extras/redhat/rhel-$releasever-$basearch' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/common/pgdg-rhel$releasever-extras/redhat/rhel-$releasever-$basearch' }}
- { name: pgdg-el8fix ,description: 'PostgreSQL EL8FIX' ,module: pgsql ,releases: [ 8 ] ,baseurl: { default: 'https://download.postgresql.org/pub/repos/yum/common/pgdg-centos8-sysupdates/redhat/rhel-8-x86_64/' , china: 'https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/common/pgdg-centos8-sysupdates/redhat/rhel-8-x86_64/' , europe: 'https://mirrors.xtom.de/postgresql/repos/yum/common/pgdg-centos8-sysupdates/redhat/rhel-8-x86_64/' }}
- { name: timescaledb ,description: 'TimescaleDB' ,module: pgsql ,releases: [7,8,9] ,baseurl: { default: 'https://packagecloud.io/timescale/timescaledb/el/$releasever/$basearch' }}
- { name: citus ,description: 'Citus Community' ,module: pgsql ,releases: [7 ] ,baseurl: { default: 'https://repos.citusdata.com/community/el/$releasever/$basearch' }}
name: repo_packages
, type: string[]
, level: G
which packages to be included
default values:
repo_packages: # which packages to be included
- grafana loki logcli promtail prometheus2 alertmanager pushgateway blackbox_exporter node_exporter redis_exporter
- nginx nginx_exporter wget createrepo_c sshpass ansible python3 python3-pip python3-requests python3-jmespath mtail dnsmasq docker-ce docker-compose-plugin etcd
- lz4 unzip bzip2 zlib yum pv jq git ncdu make patch bash lsof wget uuid tuned chrony perf flamegraph nvme-cli numactl grubby sysstat iotop htop modulemd-tools
- netcat socat rsync ftp lrzsz s3cmd net-tools tcpdump ipvsadm bind-utils telnet audit ca-certificates openssl openssh-clients readline vim-minimal haproxy redis
- postgresql15* postgis33_15* citus_15* pglogical_15* pg_squeeze_15* wal2json_15* pg_repack_15* timescaledb-2-postgresql-15* timescaledb-tools libuser openldap-compat
- patroni patroni-etcd pgbouncer pgbadger pgbackrest tail_n_mail pgloader pg_activity
- orafce_15* mysqlcompat_15 mongo_fdw_15* tds_fdw_15* mysql_fdw_15 hdfs_fdw_15 sqlite_fdw_15 pgbouncer_fdw_15 pg_dbms_job_15
- pg_stat_kcache_15* pg_stat_monitor_15* pg_qualstats_15 pg_track_settings_15 pg_wait_sampling_15 system_stats_15 logerrors_15 pg_top_15
- plprofiler_15* plproxy_15 plsh_15* pldebugger_15 plpgsql_check_15* pgtt_15 pgq_15* pgsql_tweaks_15 count_distinct_15 hypopg_15
- timestamp9_15* semver_15* prefix_15* rum_15 geoip_15 periods_15 ip4r_15 tdigest_15 hll_15 pgmp_15 extra_window_functions_15 topn_15
- pg_comparator_15 pg_ivm_15* pgsodium_15* pgfincore_15* ddlx_15 credcheck_15 postgresql_anonymizer_15* postgresql_faker_15 safeupdate_15
- pg_fkpart_15 pg_jobmon_15 pg_partman_15 pg_permissions_15 pgaudit17_15 pgexportdoc_15 pgimportdoc_15 pg_statement_rollback_15*
- pg_cron_15 pg_background_15 e-maj_15 pg_catcheck_15 pg_prioritize_15 pgcopydb_15 pg_filedump_15 pgcryptokey_15
Each line is a set of package names separated by spaces, where the specified software will be downloaded via repotrack
.
EL7, 8, 9 packages are slightly different, here are some ad hoc packages:
- EL7:
docker-compose citus111_15*
- EL8:
modulemd-tools python38-jmespath haproxy redis docker-compose-plugin citus_15* flamegraph citus_15*
- EL9:
modulemd-tools python3-jmespath haproxy redis docker-compose-plugin citus_15* flamegraph libuser openldap-compat
name: repo_url_packages
, type: string[]
, level: G
extra packages from url
default value:
repo_url_packages: # extra packages from url
- https://github.com/Vonng/pg_exporter/releases/download/v0.5.0/pg_exporter-0.5.0.x86_64.rpm
- https://github.com/cybertec-postgresql/vip-manager/releases/download/v1.0.2/vip-manager-1.0.2-1.x86_64.rpm
- https://github.com/dalibo/pev2/releases/download/v1.7.0/index.html
- https://dl.min.io/server/minio/release/linux-amd64/archive/minio-20230131022419.0.0.x86_64.rpm
- https://dl.min.io/client/mc/release/linux-amd64/archive/mcli-20230128202938.0.0.x86_64.rpm
Currently, these packages are downloaded via url rather than upstream yum repo
-
pg_exporter
: Required, core components of the monitor system. -
vip-manager
: Required, package required to enable L2 VIP for managing VIP. -
pev2
: Optional, PostgreSQL execution plan visualization -
minio/mcli
: Optional, Setup minio clusters for PostgreSQL backup center.
There are two missing packages in EL7: haproxy & redis:
- https://github.com/Vonng/pigsty-pkg/releases/download/misc/redis-6.2.7-1.el7.remi.x86_64.rpm # redis.el7
- https://github.com/Vonng/haproxy-rpm/releases/download/v2.7.2/haproxy-2.7.2-1.el7.x86_64.rpm # haproxy.el7
These packages are installed on infra nodes only, including common rpm pacakges, and pip packages.
infra_packages: # packages to be installed on infra nodes
- grafana,loki,prometheus2,alertmanager,pushgateway,blackbox_exporter,nginx_exporter,redis_exporter,pg_exporter
- nginx,ansible,python3-requests,redis,mcli,logcli,postgresql15
infra_packages_pip: '' # pip installed packages for infra nodes
name: infra_packages
, type: string[]
, level: G
packages to be installed on infra nodes
default value:
infra_packages: # packages to be installed on infra nodes
- grafana,loki,prometheus2,alertmanager,pushgateway,blackbox_exporter,nginx_exporter,redis_exporter,pg_exporter
- nginx,ansible,python3-requests,redis,mcli,logcli,postgresql15
name: infra_packages_pip
, type: string
, level: G
pip installed packages for infra nodes
default value is empty string
Pigsty exposes all Web services through Nginx: Home Page, Grafana, Prometheus, AlertManager, etc...,
and other optional tools such as PGWe, Jupyter Lab, Pgadmin, Bytebase ,and other static resource & report such as pev
, schemaspy
& pgbadger
This nginx also serves as a local yum repo.
nginx_enabled: true # enable nginx on this infra node?
nginx_sslmode: enable # nginx ssl mode? disable,enable,enforce
nginx_home: /www # nginx content dir, `/www` by default
nginx_port: 80 # nginx listen port, 80 by default
nginx_ssl_port: 443 # nginx ssl listen port, 443 by default
nginx_navbar: # nginx index page navigation links
- { name: CA Cert ,url: '/ca.crt' ,desc: 'pigsty self-signed ca.crt' }
- { name: Package ,url: '/pigsty' ,desc: 'local yum repo packages' }
- { name: Explain ,url: '/pev.html' ,desc: 'postgres explain visualizer' }
- { name: PG Logs ,url: '/logs' ,desc: 'postgres raw csv logs' }
- { name: Reports ,url: '/report' ,desc: 'pgbadger summary report' }
name: nginx_enabled
, type: bool
, level: G/I
enable nginx on this infra node?
default value: true
name: nginx_sslmode
, type: enum
, level: G
nginx ssl mode? disable,enable,enforce
default value: enable
-
disable
: listen on default port only -
enable
: serve both http / https requests -
enforce
: all links are rendered ashttps://
name: nginx_home
, type: path
, level: G
nginx content dir, /www
by default
default value: /www
Nginx root directory which contains static resource and repo resource. It's wise to set this value same as repo_home
so that local repo content is automatically served.
name: nginx_port
, type: port
, level: G
nginx listen port, 80 by default
default value: 80
name: nginx_ssl_port
, type: port
, level: G
nginx ssl listen port, 443 by default
default value: 443
name: nginx_navbar
, type: index[]
, level: G
nginx index page navigation links
default value:
nginx_navbar: # nginx index page navigation links
- { name: CA Cert ,url: '/ca.crt' ,desc: 'pigsty self-signed ca.crt' }
- { name: Package ,url: '/pigsty' ,desc: 'local yum repo packages' }
- { name: Explain ,url: '/pev.html' ,desc: 'postgres explain visualizer' }
- { name: PG Logs ,url: '/logs' ,desc: 'postgres raw csv logs' }
- { name: Reports ,url: '/report' ,desc: 'pgbadger summary report' }
Each record is rendered as a navigation link to the Pigsty home page App drop-down menu, and the apps are all optional, mounted by default on the Pigsty default server under http://pigsty/
.
The url
parameter specifies the URL PATH for the app, with the exception that if the ${grafana}
string is present in the URL, it will be automatically replaced with the Grafana domain name defined in infra_portal
.
You can set a default DNSMASQ server on infra nodes to serve DNS inquiry.
All records on infra node's /etc/hosts.d/*
will be resolved.
You have to add nameserver {{ admin_ip }}
to your /etc/resolv
to use this dns server
For pigsty managed node, the default "${admin_ip}"
in node_dns_servers
will do the trick.
dns_enabled: true # setup dnsmasq on this infra node?
dns_port: 53 # dns server listen port, 53 by default
dns_records: # dynamic dns records resolved by dnsmasq
- "${admin_ip} h.pigsty a.pigsty p.pigsty g.pigsty"
- "${admin_ip} api.pigsty adm.pigsty cli.pigsty ddl.pigsty lab.pigsty git.pigsty sss.pigsty"
name: dns_enabled
, type: bool
, level: G/I
setup dnsmasq on this infra node?
default value: true
name: dns_port
, type: port
, level: G
dns server listen port, 53 by default
default value: 53
name: dns_records
, type: string[]
, level: G
dynamic dns records resolved by dnsmasq
default value:
dns_records: # dynamic dns records resolved by dnsmasq
- "${admin_ip} h.pigsty a.pigsty p.pigsty g.pigsty"
- "${admin_ip} api.pigsty adm.pigsty cli.pigsty ddl.pigsty lab.pigsty git.pigsty sss.pigsty"
These auxiliary domain names will be written to /etc/hosts.d/default
Prometheus is used as time-series database for metrics scrape, storage & analysis.
prometheus_enabled: true # enable prometheus on this infra node?
prometheus_clean: true # clean prometheus data during init?
prometheus_data: /data/prometheus # prometheus data dir, `/data/prometheus` by default
prometheus_sd_interval: 5s # prometheus target refresh interval, 5s by default
prometheus_scrape_interval: 10s # prometheus scrape & eval interval, 10s by default
prometheus_scrape_timeout: 8s # prometheus global scrape timeout, 8s by default
prometheus_options: '--storage.tsdb.retention.time=15d' # prometheus extra server options
pushgateway_enabled: true # setup pushgateway on this infra node?
pushgateway_options: '--persistence.interval=1m' # pushgateway extra server options
blackbox_enabled: true # setup blackbox_exporter on this infra node?
blackbox_options: '' # blackbox_exporter extra server options
alertmanager_enabled: true # setup alertmanager on this infra node?
alertmanager_options: '' # alertmanager extra server options
exporter_metrics_path: /metrics # exporter metric path, `/metrics` by default
exporter_install: none # how to install exporter? none,yum,binary
exporter_repo_url: '' # exporter repo file url if install exporter via yum
name: prometheus_enabled
, type: bool
, level: G/I
enable prometheus on this infra node?
default value: true
name: prometheus_clean
, type: bool
, level: G/A
clean prometheus data during init?
default value: true
name: prometheus_data
, type: path
, level: G
prometheus data dir, /data/prometheus
by default
default value: /data/prometheus
name: prometheus_sd_interval
, type: interval
, level: G
prometheus target refresh interval, 5s by default
default value: 5s
name: prometheus_scrape_interval
, type: interval
, level: G
prometheus scrape & eval interval, 10s by default
default value: 10s
name: prometheus_scrape_timeout
, type: interval
, level: G
prometheus global scrape timeout, 8s by default
default value: 8s
DO NOT set this larger than prometheus_scrape_interval
name: prometheus_options
, type: arg
, level: G
prometheus extra server options
default value: --storage.tsdb.retention.time=15d
Extra cli args for prometheus server, the default value will set up a 15-day data retention to limit disk usage.
name: pushgateway_enabled
, type: bool
, level: G/I
setup pushgateway on this infra node?
default value: true
name: pushgateway_options
, type: arg
, level: G
pushgateway extra server options
default value: --persistence.interval=1m
name: blackbox_enabled
, type: bool
, level: G/I
setup blackbox_exporter on this infra node?
default value: true
name: blackbox_options
, type: arg
, level: G
blackbox_exporter extra server options
default value is empty string
name: alertmanager_enabled
, type: bool
, level: G/I
setup alertmanager on this infra node?
default value: true
name: alertmanager_options
, type: arg
, level: G
alertmanager extra server options
default value is empty string
name: exporter_metrics_path
, type: path
, level: G
exporter metric path, /metrics
by default
default value: /metrics
name: exporter_install
, type: enum
, level: G
how to install exporter? none,yum,binary
default value: none
Specify how to install Exporter:
-
none
: No installation, (by default, the Exporter has been previously installed by thenode.pkgs
task) -
yum
: Install using yum (if yum installation is enabled, run yum to installnode_exporter
andpg_exporter
before deploying Exporter) -
binary
: Install using a copy binary (copynode_exporter
andpg_exporter
binary directly from the meta node, not recommended)
When installing with yum
, if exporter_repo_url
is specified (not empty), the installation will first install the REPO file under that URL into /etc/yum.repos.d
. This feature allows you to install Exporter directly without initializing the node infrastructure.
It is not recommended for regular users to use binary
installation. This mode is usually used for emergency troubleshooting and temporary problem fixes.
<meta>:<pigsty>/files/node_exporter -> <target>:/usr/bin/node_exporter
<meta>:<pigsty>/files/pg_exporter -> <target>:/usr/bin/pg_exporter
name: exporter_repo_url
, type: url
, level: G
exporter repo file url if install exporter via yum
default value is empty string
Default is empty; when exporter_install
is yum
, the repo specified by this parameter will be added to the node source list.
Grafana is the visualization platform for Pigsty's monitoring system.
It can also be used as a low code data visualization environment
grafana_enabled: true # enable grafana on this infra node?
grafana_clean: true # clean grafana data during init?
grafana_admin_username: admin # grafana admin username, `admin` by default
grafana_admin_password: pigsty # grafana admin password, `pigsty` by default
grafana_plugin_cache: /www/pigsty/plugins.tgz # path to grafana plugins cache tarball
grafana_plugin_list: # grafana plugins to be downloaded with grafana-cli
- volkovlabs-echarts-panel
- marcusolsson-treemap-panel
loki_enabled: true # enable loki on this infra node?
loki_clean: false # whether remove existing loki data?
loki_data: /data/loki # loki data dir, `/data/loki` by default
loki_retention: 15d # loki log retention period, 15d by default
name: grafana_enabled
, type: bool
, level: G/I
enable grafana on this infra node?
default value: true
name: grafana_clean
, type: bool
, level: G/A
clean grafana data during init?
default value: true
name: grafana_admin_username
, type: username
, level: G
grafana admin username, admin
by default
default value: admin
name: grafana_admin_password
, type: password
, level: G
grafana admin password, pigsty
by default
default value: pigsty
name: grafana_plugin_cache
, type: path
, level: G
path to grafana plugins cache tarball
default value: /www/pigsty/plugins.tgz
If that cache exists, pigsty use that instead of downloading plugins from the Internet
name: grafana_plugin_list
, type: string[]
, level: G
grafana plugins to be downloaded with grafana-cli
default value:
["volkovlabs-echarts-panel", "marcusolsson-treemap-panel"]
name: loki_enabled
, type: bool
, level: G/I
enable loki on this infra node?
default value: true
name: loki_clean
, type: bool
, level: G/A
whether remove existing loki data?
default value: false
name: loki_data
, type: path
, level: G
loki data dir
default value: /data/loki
name: loki_retention
, type: interval
, level: G
loki log retention period, 15d by default
default value: 15d
Node module are tuning target nodes into desired state and take it into the Pigsty monitor system.
Each node has identity parameters that are configured through the parameters in <cluster>.hosts
and <cluster>.vars
.
Pigsty uses IP as a unique identifier for database nodes. This IP must be the IP that the database instance listens to and serves externally, But it would be inappropriate to use a public IP address!
This is very important. The IP is the inventory_hostname
of the host in the inventory, which is reflected as the key
in the <cluster>.hosts
object.
You can use ansible_*
parameters to overwrite ssh
behavior, e.g. connect via domain name / alias, but the primary IPv4 is still the core identity of the node.
nodename
and node_cluster
are not mandatory; nodename
will use the node's current hostname by default, while node_cluster
will use the fixed default value: nodes
.
If node_id_from_pg
is enabled, the node will borrow PGSQL
identity and use it as Node's identity, i.e. node_cluster
is set to pg_cluster
if applicable, and nodename
is set to ${pg_cluster}-${pg_seq}
. If nodename_overwrite
is enabled, node's hostname will be overwritten by nodename
Pigsty labels a node with identity parameters in the monitoring system. Which maps nodename
to ins
, and node_cluster
into cls
.
Name | Type | Level | Necessity | Comment |
---|---|---|---|---|
inventory_hostname |
ip |
- | Required | Node IP |
nodename |
string |
I | Optional | Node Name |
node_cluster |
string |
C | Optional | Node cluster name |
The following cluster config declares a three-node node cluster:
node-test:
hosts:
10.10.10.11: { nodename: node-test-1 }
10.10.10.12: { nodename: node-test-2 }
10.10.10.13: { nodename: node-test-3 }
vars:
node_cluster: node-test
Default values:
#nodename: # [INSTANCE] # node instance identity, use hostname if missing, optional
node_cluster: nodes # [CLUSTER] # node cluster identity, use 'nodes' if missing, optional
nodename_overwrite: true # overwrite node's hostname with nodename?
nodename_exchange: false # exchange nodename among play hosts?
node_id_from_pg: true # use postgres identity as node identity if applicable?
name: nodename
, type: string
, level: I
node instance identity, use hostname if missing, optional
no default value, Null or empty string means nodename
will be set to node's current hostname.
If node_id_from_pg
is true
, nodename
will try to use ${pg_cluster}-${pg_seq}
first, if PGSQL is not defined on this node, it will fall back to default HOSTNAME
.
If nodename_overwrite
is true
, the node name will also be used as the HOSTNAME.
name: node_cluster
, type: string
, level: C
node cluster identity, use 'nodes' if missing, optional
default values: nodes
If node_id_from_pg
is true
, node_cluster
will try to use ${pg_cluster}-${pg_seq}
first, if PGSQL is not defined on this node, it will fall back to default HOSTNAME
.
If nodename_overwrite
is true
, the node name will also be used as the HOSTNAME.
name: nodename_overwrite
, type: bool
, level: C
overwrite node's hostname with nodename?
default value is true
, a non-empty node name nodename
will override the hostname of the current node.
No changes are made to the hostname if the nodename
parameter is undefined, empty, or an empty string.
name: nodename_exchange
, type: bool
, level: C
exchange nodename among play hosts?
default value is false
When this parameter is enabled, node names are exchanged between the same group of nodes executing the node.yml
playbook, written to /etc/hosts
.
name: node_id_from_pg
, type: bool
, level: C
use postgres identity as node identity if applicable?
default value is true
Boworrow PostgreSQL cluster & instance identity if application.
It's useful to use same identity for postgres & node if there's a 1:1 relationship
Pigsty configs static DNS records and dynamic DNS resolver for nodes.
If you already have a DNS server, set node_dns_method
to none
to disable dynamic DNS setup.
\
node_default_etc_hosts: # static dns records in `/etc/hosts`
- "${admin_ip} h.pigsty a.pigsty p.pigsty g.pigsty"
node_etc_hosts: [] # extra static dns records in `/etc/hosts`
node_dns_method: add # how to handle dns servers: add,none,overwrite
node_dns_servers: ['${admin_ip}'] # dynamic nameserver in `/etc/resolv.conf`
node_dns_options: # dns resolv options in `/etc/resolv.conf`
- options single-request-reopen timeout:1
name: node_default_etc_hosts
, type: string[]
, level: G
static dns records in /etc/hosts
default value:
["${admin_ip} h.pigsty a.pigsty p.pigsty g.pigsty"]
node_default_etc_hosts
is an array. Each element is a DNS record with format <ip> <name>
.
It is used for global static DNS records. You can use node_etc_hosts
for ad hoc records for each cluster.
Make sure to write a DNS record like 10.10.10.10 h.pigsty a.pigsty p.pigsty g.pigsty
to /etc/hosts
to ensure that the local yum repo can be accessed using the domain name before the DNS Nameserver starts.
name: node_etc_hosts
, type: string[]
, level: C
extra static dns records in /etc/hosts
default values: []
Same as node_default_etc_hosts
, but in addition to it.
name: node_dns_method
, type: enum
, level: C
how to handle dns servers: add,none,overwrite
default values: add
-
add
: Append the records innode_dns_servers
to/etc/resolv.conf
and keep the existing DNS servers. (default) -
overwrite
: Overwrite/etc/resolv.conf
with the record innode_dns_servers
-
none
: If a DNS server is provided in the production env, the DNS server config can be skipped.
name: node_dns_servers
, type: string[]
, level: C
dynamic nameserver in /etc/resolv.conf
default values: ["${admin_ip}"]
Default name server ip address to be added to /etc/resolv.conf
name: node_dns_options
, type: string[]
, level: C
dns resolv options in /etc/resolv.conf
default value:
["options single-request-reopen timeout:1"]
This section is about upstream yum repos & packages to be installed.
node_repo_method: local # how to setup node repo: none,local,public
node_repo_remove: true # remove existing repo on node?
node_repo_local_urls: # local repo url, if node_repo_method = local
- http://${admin_ip}/pigsty.repo
node_packages: [ ] # packages to be installed current nodes
node_default_packages: # default packages to be installed on all nodes
- lz4,unzip,bzip2,zlib,yum,pv,jq,git,ncdu,make,patch,bash,lsof,wget,uuid,tuned,chrony,perf,nvme-cli,numactl,grubby,sysstat,iotop,htop,yum,yum-utils
- wget,netcat,socat,rsync,ftp,lrzsz,s3cmd,net-tools,tcpdump,ipvsadm,bind-utils,telnet,dnsmasq,audit,ca-certificates,openssl,openssh-clients,readline,vim-minimal
- node_exporter,etcd,mtail,python3-idna,python3-requests,haproxy
name: node_repo_method
, type: enum
, level: C
how to setup node repo: none,local,public
default values: local
-
local
: Use the local Yum repo on the meta node, the default behavior (recommended). -
public
: To install using internet sources, write the public repo inrepo_upstream
to/etc/yum.repos.d/
. -
none
: No config and modification of local repos.
name: node_repo_remove
, type: bool
, level: C
remove existing repo on node?
default value is true
, and thus Pigsty will move existing repo file in /etc/yum.repos.d
to backup dir: /etc/yum.repos.d/backup
before adding upstream repos
name: node_repo_local_urls
, type: string[]
, level: C
local repo url, if node_repo_method = local
default values: ["http://${admin_ip}/pigsty.repo"]
When node_repo_method
= local
, the Repo file URLs listed here will be downloaded to /etc/yum.repos.d
.
name: node_packages
, type: string[]
, level: C
packages to be installed current nodes
default values: []
Like node_packages_default
, but in addition to it. designed for overwriting in cluster/instance level.
name: node_default_packages
, type: string[]
, level: G
default packages to be installed on all nodes
default value:
node_default_packages: # default packages to be installed on all nodes
- lz4,unzip,bzip2,zlib,yum,pv,jq,git,ncdu,make,patch,bash,lsof,wget,uuid,tuned,chrony,perf,nvme-cli,numactl,grubby,sysstat,iotop,htop,yum,yum-utils
- wget,netcat,socat,rsync,ftp,lrzsz,s3cmd,net-tools,tcpdump,ipvsadm,bind-utils,telnet,dnsmasq,audit,ca-certificates,openssl,openssh-clients,readline,vim-minimal
- node_exporter,etcd,mtail,python3-idna,python3-requests,haproxy
Configure tuned templates, features, kernel modules, sysctl params on node.
node_disable_firewall: true # disable node firewall? true by default
node_disable_selinux: true # disable node selinux? true by default
node_disable_numa: false # disable node numa, reboot required
node_disable_swap: false # disable node swap, use with caution
node_static_network: true # preserve dns resolver settings after reboot
node_disk_prefetch: false # setup disk prefetch on HDD to increase performance
node_kernel_modules: [ softdog, br_netfilter, ip_vs, ip_vs_rr, ip_vs_wrr, ip_vs_sh ]
node_hugepage_ratio: 0 # node mem hugepage ratio, 0 disable it by default
node_tune: oltp # node tuned profile: none,oltp,olap,crit,tiny
node_sysctl_params: { } # sysctl parameters in k:v format in addition to tuned
name: node_disable_firewall
, type: bool
, level: C
disable node firewall? true by default
default value is true
name: node_disable_selinux
, type: bool
, level: C
disable node selinux? true by default
default value is true
name: node_disable_numa
, type: bool
, level: C
disable node numa, reboot required
default value is false
Boolean flag, default is not off. Note that turning off NUMA requires a reboot of the machine before it can take effect!
If you don't know how to set the CPU affinity, it is recommended to turn off NUMA.
name: node_disable_swap
, type: bool
, level: C
disable node swap, use with caution
default value is false
But turning off SWAP is not recommended. But SWAP should be disabled when your node is used for a Kubernetes deployment.
If there is enough memory and the database is deployed exclusively. it may slightly improve performance
name: node_static_network
, type: bool
, level: C
preserve dns resolver settings after reboot
default value is true
Enabling static networking means that machine reboots will not overwrite your DNS Resolv config with NIC changes. It is recommended to enable it in production nodes.
name: node_disk_prefetch
, type: bool
, level: C
setup disk prefetch on HDD to increase performance
default value is false
Consider enable this when using HDD.
name: node_kernel_modules
, type: string[]
, level: C
kernel modules to be enabled on this node
default value:
node_kernel_modules: [ softdog, br_netfilter, ip_vs, ip_vs_rr, ip_vs_wrr, ip_vs_sh ]
An array consisting of kernel module names declaring the kernel modules that need to be installed on the node.
name: node_hugepage_ratio
, type: float
, level: C
node mem hugepage ratio, 0 disable it by default, valid range: 0 ~ 0.40
default values: 0
Percent of this memory will be allocated as HugePage, and reserved for PostgreSQL.
It should be slightly larger than pg_shmem_ratio
, if not zero.
name: node_tune
, type: enum
, level: C
node tuned profile: none,oltp,olap,crit,tiny
default values: oltp
-
tiny
: Micro Virtual Machine (1 ~ 3 Core, 1 ~ 8 GB Mem) -
oltp
: Regular OLTP templates with optimized latency -
olap
: Regular OLAP templates to optimize throughput -
crit
: Core financial business templates, optimizing the number of dirty pages
Usually, the database tuning template pg_conf
should be paired with the node tuning template: node_tune
name: node_sysctl_params
, type: dict
, level: C
sysctl parameters in k:v format in addition to tuned
default values: {}
Dictionary K-V structure, Key is kernel sysctl
parameter name, Value is the parameter value.
You can also define sysctl parameters with tuned profile
This section is about admin users and it's credentials.
node_data: /data # node main data directory, `/data` by default
node_admin_enabled: true # create a admin user on target node?
node_admin_uid: 88 # uid and gid for node admin user
node_admin_username: dba # name of node admin user, `dba` by default
node_admin_ssh_exchange: true # exchange admin ssh key among node cluster
node_admin_pk_current: true # add current user's ssh pk to admin authorized_keys
node_admin_pk_list: [] # ssh public keys to be added to admin user
name: node_data
, type: path
, level: C
node main data directory, /data
by default
default values: /data
If specified, this path will be used as major data disk mountpoint. And a dir will be created and throwing a warning if path not exists.
The data dir is owned by root with mode 0777
.
name: node_admin_enabled
, type: bool
, level: C
create a admin user on target node?
default value is true
Create an admin user on each node (password-free sudo and ssh), an admin user named dba (uid=88)
will be created by default,
which can access other nodes in the env and perform sudo from the meta node via SSH password-free.
name: node_admin_uid
, type: int
, level: C
uid and gid for node admin user
default values: 88
name: node_admin_username
, type: username
, level: C
name of node admin user, dba
by default
default values: dba
name: node_admin_ssh_exchange
, type: bool
, level: C
exchange admin ssh key among node cluster
default value is true
When enabled, Pigsty will exchange SSH public keys between members during playbook execution, allowing admins node_admin_username
to access each other from different nodes.
name: node_admin_pk_current
, type: bool
, level: C
add current user's ssh pk to admin authorized_keys
default value is true
When enabled, on the current node, the SSH public key (~/.ssh/id_rsa.pub
) of the current user is copied to the authorized_keys
of the target node admin user.
When deploying in a production env, be sure to pay attention to this parameter, which installs the default public key of the user currently executing the command to the admin user of all machines.
name: node_admin_pk_list
, type: string[]
, level: C
ssh public keys to be added to admin user
default values: []
Each element of the array is a string containing the key written to the admin user ~/.ssh/authorized_keys
, and the user with the corresponding private key can log in as an admin user.
When deploying in production envs, be sure to note this parameter and add only trusted keys to this list.
node_timezone: '' # setup node timezone, empty string to skip
node_ntp_enabled: true # enable chronyd time sync service?
node_ntp_servers: # ntp servers in `/etc/chrony.conf`
- pool pool.ntp.org iburst
node_crontab_overwrite: true # overwrite or append to `/etc/crontab`?
node_crontab: [ ] # crontab entries in `/etc/crontab`
name: node_timezone
, type: string
, level: C
setup node timezone, empty string to skip
default value is empty string, which will not change the default timezone (usually UTC)
name: node_ntp_enabled
, type: bool
, level: C
enable chronyd time sync service?
default value is true
, and thus Pigsty will override the node's /etc/chrony.conf
by with node_ntp_servers
.
If you already a NTP server configured, just set to false
to leave it be.
name: node_ntp_servers
, type: string[]
, level: C
ntp servers in /etc/chrony.conf
default value: ["pool pool.ntp.org iburst"]
It only takes effect if node_ntp_enabled
is true.
You can use ${meta_ip}
to sync time with infra node ntp server.
name: node_crontab_overwrite
, type: bool
, level: C
overwrite or append to /etc/crontab
?
default value is true
, and pigsty will render records in node_crontab
in overwrite mode rather than appending to it.
name: node_crontab
, type: string[]
, level: C
crontab entries in /etc/crontab
default values: []
HAProxy is installed on every node by default, exposing services in a NodePort manner.
haproxy_enabled: true # enable haproxy on this node?
haproxy_clean: false # cleanup all existing haproxy config?
haproxy_reload: true # reload haproxy after config?
haproxy_auth_enabled: true # enable authentication for haproxy admin page
haproxy_admin_username: admin # haproxy admin username, `admin` by default
haproxy_admin_password: pigsty # haproxy admin password, `pigsty` by default
haproxy_exporter_port: 9101 # haproxy admin/exporter port, 9101 by default
haproxy_client_timeout: 24h # client side connection timeout, 24h by default
haproxy_server_timeout: 24h # server side connection timeout, 24h by default
haproxy_services: [] # list of haproxy service to be exposed on node
name: haproxy_enabled
, type: bool
, level: C
enable haproxy on this node?
default value is true
name: haproxy_clean
, type: bool
, level: G/C/A
cleanup all existing haproxy config?
default value is false
name: haproxy_reload
, type: bool
, level: A
reload haproxy after config?
default value is true
, it will reload haproxy after config change.
If you wish to check before apply, you can turn off this with cli args and check it.
name: haproxy_auth_enabled
, type: bool
, level: G
enable authentication for haproxy admin page
default value is true
, which will require a http basic auth for admin page.
disable it is not recommended, since your traffic control will be exposed
name: haproxy_admin_username
, type: username
, level: G
haproxy admin username, admin
by default
default values: admin
name: haproxy_admin_password
, type: password
, level: G
haproxy admin password, pigsty
by default
default values: pigsty
name: haproxy_exporter_port
, type: port
, level: C
haproxy admin/exporter port, 9101 by default
default values: 9101
name: haproxy_client_timeout
, type: interval
, level: C
client side connection timeout, 24h by default
default values: 24h
name: haproxy_server_timeout
, type: interval
, level: C
server side connection timeout, 24h by default
default values: 24h
name: haproxy_services
, type: service[]
, level: C
list of haproxy service to be exposed on node
default values: []
, each element is a service definition, here is an ad hoc haproxy service example:
haproxy_services: # list of haproxy service
# expose pg-test read only replicas
- name: pg-test-ro # [REQUIRED] service name, unique
port: 5440 # [REQUIRED] service port, unique
ip: "*" # [OPTIONAL] service listen addr, "*" by default
protocol: tcp # [OPTIONAL] service protocol, 'tcp' by default
balance: leastconn # [OPTIONAL] load balance algorithm, roundrobin by default (or leastconn)
maxconn: 20000 # [OPTIONAL] max allowed front-end connection, 20000 by default
default: 'inter 3s fastinter 1s downinter 5s rise 3 fall 3 on-marked-down shutdown-sessions slowstart 30s maxconn 3000 maxqueue 128 weight 100'
options:
- option httpchk
- option http-keep-alive
- http-check send meth OPTIONS uri /read-only
- http-check expect status 200
servers:
- { name: pg-test-1 ,ip: 10.10.10.11 , port: 5432 , options: check port 8008 , backup: true }
- { name: pg-test-2 ,ip: 10.10.10.12 , port: 5432 , options: check port 8008 }
- { name: pg-test-3 ,ip: 10.10.10.13 , port: 5432 , options: check port 8008 }
It will be rendered to /etc/haproxy/<service.name>.cfg
and take effect after reload.
Pigsty install docker on infra nodes by default, which gives you the ability to run stateless software programs easily.
docker_enabled: false # enable docker on this node?
docker_cgroups_driver: systemd # docker cgroup fs driver: cgroupfs,systemd
docker_registry_mirrors: [] # docker registry mirror list
docker_image_cache: /tmp/docker # docker image cache dir, `/tmp/docker` by default
name: docker_enabled
, type: bool
, level: C
enable docker on this node?
default value is false
name: docker_cgroups_driver
, type: enum
, level: C
docker cgroup fs driver: cgroupfs,systemd
default values: systemd
name: docker_registry_mirrors
, type: string[]
, level: C
docker registry mirror list
default values: []
name: docker_image_cache
, type: path
, level: C
docker image cache dir, /tmp/docker
by default
default values: /tmp/docker
The local image cache in that dir with .tgz
suffix will be loaded into docker one by one with:
cat {{ docker_image_cache }}/*.tgz | gzip -d -c - | docker load
node_exporter_enabled: true # setup node_exporter on this node?
node_exporter_port: 9100 # node exporter listen port, 9100 by default
node_exporter_options: '--no-collector.softnet --no-collector.nvme --collector.ntp --collector.tcpstat --collector.processes'
name: node_exporter_enabled
, type: bool
, level: C
setup node_exporter on this node?
default value is true
name: node_exporter_port
, type: port
, level: C
node exporter listen port, 9100 by default
default values: 9100
name: node_exporter_options
, type: arg
, level: C
extra server options for node_exporter
default value: --no-collector.softnet --no-collector.nvme --collector.ntp --collector.tcpstat --collector.processes
Pigsty enables ntp
, tcpstat
, processes
three extra metrics, collectors, by default, and disables softnet
, nvme
metrics collectors by default.
Promtail will collect logs from other modules, and send them to LOKI
-
INFRA
: Infra logs, collected only on meta nodes.-
nginx-access
:/var/log/nginx/access.log
-
nginx-error
:/var/log/nginx/error.log
-
grafana
:/var/log/grafana/grafana.log
-
-
NODES
: Host node logs, collected on all nodes.-
syslog
:/var/log/messages
-
dmesg
:/var/log/dmesg
-
cron
:/var/log/cron
-
-
PGSQL
: PostgreSQL logs, collected when a node is defined withpg_cluster
.-
postgres
:/pg/log/postgres/*.csv
-
patroni
:/pg/log/patroni.log
-
pgbouncer
:/pg/log/pgbouncer/pgbouncer.log
-
pgbackrest
:/pg/log/pgbackrest/*.log
-
-
REDIS
: Redis logs, collected when a node is defined withredis_cluster
.-
redis
:/var/log/redis/*.log
-
Log directory are customizable according to
pg_log_dir
,patroni_log_dir
,pgbouncer_log_dir
,pgbackrest_log_dir
promtail_enabled: true # enable promtail logging collector?
promtail_clean: false # purge existing promtail status file during init?
promtail_port: 9080 # promtail listen port, 9080 by default
promtail_positions: /var/log/positions.yaml # promtail position status file path
name: promtail_enabled
, type: bool
, level: C
enable promtail logging collector?
default value is true
name: promtail_clean
, type: bool
, level: G/A
purge existing promtail status file during init?
default value is false
, if you choose to clean, Pigsty will remove the existing state file defined by promtail_positions
which means that Promtail will recollect all logs on the current node and send them to Loki again.
name: promtail_port
, type: port
, level: C
promtail listen port, 9080 by default
default values: 9080
name: promtail_positions
, type: path
, level: C
promtail position status file path
default values: /var/log/positions.yaml
Promtail records the consumption offsets of all logs, which are periodically written to the file specified by promtail_positions
.
Distributed Configuration Store (DCS) is a distributed, highly available meta-database that provides HA consensus and service discovery.
Pigsty use etcd
as DCS. ETCD availability is critical for postgres HA. Special care needs to be taken when using the DCS service in a production env.
Availability of ETCD itself is achieved through multiple peers. For example, a 3-node ETCD cluster allows up to one node to fail, while a 5-node ETCD cluster allows 2 nodes to fail.
In a large-scale production env, it is recommended to use at least 3~5 ETCD Servers.
#etcd_seq: 1 # etcd instance identifier, explicitly required
#etcd_cluster: etcd # etcd cluster & group name, etcd by default
etcd_safeguard: false # prevent purging running etcd instance?
etcd_clean: true # purging existing etcd during initialization?
etcd_data: /data/etcd # etcd data directory, /data/etcd by default
etcd_port: 2379 # etcd client port, 2379 by default
etcd_peer_port: 2380 # etcd peer port, 2380 by default
etcd_init: new # etcd initial cluster state, new or existing
etcd_election_timeout: 1000 # etcd election timeout, 1000ms by default
etcd_heartbeat_interval: 100 # etcd heartbeat interval, 100ms by default
name: etcd_seq
, type: int
, level: I
etcd instance identifier, REQUIRED
no default value, you have to specify it explicitly.
name: etcd_cluster
, type: string
, level: C
etcd cluster & group name, etcd by default
default values: etcd
, which is a fixed group name
name: etcd_safeguard
, type: bool
, level: G/C/A
prevent purging running etcd instance?
default value is false
Assure that any running etcd instance will not be purged by init / remove playbooks.
name: etcd_clean
, type: bool
, level: G/C/A
purging existing etcd during initialization?
default value is true
, which will try to purge existing etcd instance during init, which makes etcd.yml
a truly idempotent playbook.
But if etcd_safeguard
is enabled, it will still abort on any running etcd instance.
name: etcd_data
, type: path
, level: C
etcd data directory, /data/etcd by default
default values: /data/etcd
name: etcd_port
, type: port
, level: C
etcd client port, 2379 by default
default values: 2379
name: etcd_peer_port
, type: port
, level: C
etcd peer port, 2380 by default
default values: 2380
name: etcd_init
, type: enum
, level: C
etcd initial cluster state, new or existing
default values: new
existing
is used when trying to add new node to existing etcd cluster.
name: etcd_election_timeout
, type: int
, level: C
etcd election timeout, 1000ms by default
default values: 1000
name: etcd_heartbeat_interval
, type: int
, level: C
etcd heartbeat interval, 100ms by default
default values: 100
Minio is a S3 compatible object storage service. Which is used as an optional central backup storage repo for PostgreSQL.
But you can use it for other purpose such as store large files, document, pictures & videos.
#minio_seq: 1 # minio instance identifier, REQUIRED
minio_cluster: minio # minio cluster name, minio by default
minio_clean: false # cleanup minio during init?, false by default
minio_user: minio # minio os user, `minio` by default
minio_node: '${minio_cluster}-${minio_seq}.pigsty' # minio node name pattern
minio_data: '/data/minio' # minio data dir(s), use {x...y} to specify multi drivers
minio_domain: sss.pigsty # minio external domain name, `sss.pigsty` by default
minio_port: 9000 # minio service port, 9000 by default
minio_admin_port: 9001 # minio console port, 9001 by default
minio_access_key: minioadmin # root access key, `minioadmin` by default
minio_secret_key: minioadmin # root secret key, `minioadmin` by default
minio_extra_vars: '' # extra environment variables
minio_alias: sss # alias name for local minio deployment
minio_buckets: [ { name: pgsql }, { name: infra }, { name: redis } ]
minio_users:
- { access_key: dba , secret_key: S3User.DBA, policy: consoleAdmin }
- { access_key: pgbackrest , secret_key: S3User.Backup, policy: readwrite }
name: minio_seq
, type: int
, level: I
minio instance identifier, REQUIRED identity parameters
no default value
name: minio_cluster
, type: string
, level: C
minio cluster name, minio by default
default values: minio
name: minio_clean
, type: bool
, level: G/C/A
cleanup minio during init?, false by default
default value is false
name: minio_user
, type: username
, level: C
minio os user, minio
by default
default values: minio
name: minio_node
, type: string
, level: C
minio node name pattern
default values: ${minio_cluster}-${minio_seq}.pigsty
name: minio_data
, type: path
, level: C
minio data dir(s), use {x...y} to specify multi drivers
default values: /data/minio
name: minio_domain
, type: string
, level: G
minio external domain name, sss.pigsty
by default
default values: sss.pigsty
name: minio_port
, type: port
, level: C
minio service port, 9000 by default
default values: 9000
name: minio_admin_port
, type: port
, level: C
minio console port, 9001 by default
default values: 9001
name: minio_access_key
, type: username
, level: C
root access key, minioadmin
by default
default values: minioadmin
name: minio_secret_key
, type: password
, level: C
root secret key, minioadmin
by default
default values: minioadmin
name: minio_extra_vars
, type: string
, level: C
extra environment variables for minio server
default value is empty string, you can use multiline string to passing multiple environment
name: minio_alias
, type: string
, level: G
alias name for local minio deployment
default values: sss
name: minio_buckets
, type: bucket[]
, level: C
list of minio bucket to be created
default value:
minio_buckets: [ { name: pgsql }, { name: infra }, { name: redis } ]
3 default buckets are created for module PGSQL
, INFRA
, and REDIS
name: minio_users
, type: user[]
, level: C
list of minio user to be created
default value:
minio_users:
- { access_key: dba , secret_key: S3User.DBA, policy: consoleAdmin }
- { access_key: pgbackrest , secret_key: S3User.Backup, policy: readwrite }
Two default users are created for PostgreSQL backup usage.
# pg_cluster: #CLUSTER # pgsql cluster name, required identity parameter
# pg_seq: 0 #INSTANCE # pgsql instance seq number, required identity parameter
# pg_role: replica #INSTANCE # pgsql role, required, could be primary,replica,offline
# pg_instances: {} #INSTANCE # define multiple pg instances on node in `{port:ins_vars}` format
# pg_upstream: #INSTANCE # repl upstream ip addr for standby cluster or cascade replica
# pg_shard: #CLUSTER # pgsql shard name, optional identity for sharding clusters
# pg_group: 0 #CLUSTER # pgsql shard index number, optional identity for sharding clusters
# gp_role: master #CLUSTER # greenplum role of this cluster, could be master or segment
pg_offline_query: false #INSTANCE # set to true to enable offline query on this instance
pg_weight: 100 #INSTANCE # relative load balance weight in service, 100 by default, 0-255
There are some identity parameters require explicit allocation.
Name | Type | Level | Description |
---|---|---|---|
pg_cluster |
string |
C | PG database cluster name |
pg_seq |
number |
I | PG database instance id |
pg_role |
enum |
I | PG database instance role |
pg_shard |
string |
C | PG database shard name of cluster |
pg_group |
number |
C | PG database shard index of cluster |
-
pg_cluster
: It identifies the name of the cluster, which is configured at the cluster level. -
pg_role
: Configured at the instance level, identifies the role of the ins. Only theprimary
role will be handled specially. If not filled in, the default is thereplica
role and the specialdelayed
andoffline
roles. -
pg_seq
: Used to identify the ins within the cluster, usually with an integer number incremented from 0 or 1, which is not changed once it is assigned. -
{{ pg_cluster }}-{{ pg_seq }}
is used to uniquely identify the ins, i.e.pg_instance
. -
{{ pg_cluster }}-{{ pg_role }}
is used to identify the services within the cluster, i.e.pg_service
. -
pg_shard
andpg_group
are used for horizontally sharding clusters, reserved for citus, greenplum, and matrixdb.
pg_cluster
, pg_role
, pg_seq
are core identity params, which are required for any Postgres cluster, and must be explicitly specified. Here's an example:
pg-test:
hosts:
10.10.10.11: {pg_seq: 1, pg_role: replica}
10.10.10.12: {pg_seq: 2, pg_role: primary}
10.10.10.13: {pg_seq: 3, pg_role: replica}
vars:
pg_cluster: pg-test
All other params can be inherited from the global config or the default config, but the identity params must be explicitly specified and manually assigned. The current PGSQL identity params are as follows:
name: pg_mode
, type: enum
, level: C
pgsql cluster mode, cloud be pgsql
, citus
, or gpsql
, pgsql
by default.
If pg_mode
is set to citus
or gpsql
, pg_shard
and pg_group
will be required for horizontal sharding clusters.
name: pg_cluster
, type: string
, level: C
pgsql cluster name, REQUIRED identity parameter
The cluster name will be used as the namespace for postgres related resources within that cluster.
The naming needs to follow a specific naming pattern: [a-z][a-z0-9-]*
to be compatible with the requirements of different constraints on the identity.
name: pg_seq
, type: int
, level: I
pgsql instance seq number, REQUIRED identity parameter
A serial number of this instance, unique within its cluster, starting from 0 or 1.
name: pg_role
, type: enum
, level: I
pgsql role, REQUIRED, could be primary,replica,offline
Roles for PGSQL instance, can be: primary
, replica
, standby
or offline
.
-
primary
: Primary, there is one and only one primary in a cluster. -
replica
: Replica for carrying online read-only traffic, there may be a slight replication delay through (10ms~100ms, 100KB). -
standby
: Special replica that is always synced with primary, there's no replication delay & data loss on this replica. -
offline
: Offline replica for taking on offline read-only traffic, such as statistical analysis/ETL/personal queries, etc.
Identity params, required params, and instance-level params.
name: pg_instances
, type: dict
, level: I
define multiple pg instances on node in {port:ins_vars}
format
Reserved for single-node multi instance deployment
name: pg_upstream
, type: ip
, level: I
repl upstream ip addr for standby cluster or cascade replica
Setting pg_upstream
is set on primary
instance indicate that this cluster is a Standby Cluster, and will receiving changes from upstream instance, thus the primary
is actually a standby leader
.
Setting pg_upstream
for a non-primary instance will explicitly set a replication upstream instance, if it is different from the primary ip addr, this instance will become a cascade replica.
name: pg_shard
, type: string
, level: C
pgsql shard name, optional identity for sharding clusters
When multiple pgsql clusters serve the same business together in a horizontally sharded style, Pigsty will mark this group of clusters as a Sharding Group.
pg_shard
is the name of the shard group name. It's usually the prefix of pg_cluster
.
sharding group: pg-test
cls sindex = 1: pg-testshard1
cls sindex = 2: pg-testshard2
cls sindex = 3: pg-testshard3
cls sindex = 4: pg-testshard4
name: pg_group
, type: int
, level: C
pgsql shard index number, optional identity for sharding clusters
Sharding cluster index of sharding group, used in pair with pg_shard.
name: gp_role
, type: enum
, level: C
greenplum role of this cluster, could be master or segment
default values: master
, mark a postgres cluster as greenplum master.
segment
will makr a postgres cluster as greenplum segment
name: pg_exporters
, type: dict
, level: C
additional pg_exporters to monitor remote postgres instances
default values: {}
pg_exporters: # list all remote instances here, alloc a unique unused local port as k
20001: { pg_cluster: pg-foo, pg_seq: 1, pg_host: 10.10.10.10 }
20004: { pg_cluster: pg-foo, pg_seq: 2, pg_host: 10.10.10.11 }
20002: { pg_cluster: pg-bar, pg_seq: 1, pg_host: 10.10.10.12 }
20003: { pg_cluster: pg-bar, pg_seq: 1, pg_host: 10.10.10.13 }
If you wish to monitoring remote postgres instances, define them in pg_exporters
and load them with pgsql-monitor.yml
playbook.
Check PGSQL Monitoring for details.
name: pg_offline_query
, type: bool
, level: G
set to true to enable offline query on this instance
default value is false
When set to true
, the user group dbrole_offline
can connect to the ins and perform offline queries, regardless of the role of the current instance, just like a offline
instance.
If you just have one replica or even one primary in your postgres cluster, adding this could mark it for accepting ETL, slow queries with interactive access.
name: pg_weight
, type: int
, level: G
relative load balance weight in service, 100 by default, 0-255
default values: 100
You have to reload Service to take effect.
Database credentials, In-Database Objects that need to be taken care of by Users.
# postgres business object definition, overwrite in group vars
pg_users: [] # postgres business users
pg_databases: [] # postgres business databases
pg_services: [] # postgres business services
pg_hba_rules: [] # business hba rules for postgres
pgb_hba_rules: [] # business hba rules for pgbouncer
# global credentials, overwrite in global vars
pg_replication_username: replicator
pg_replication_password: DBUser.Replicator
pg_admin_username: dbuser_dba
pg_admin_password: DBUser.DBA
pg_monitor_username: dbuser_monitor
pg_monitor_password: DBUser.Monitor
name: pg_users
, type: user[]
, level: C
postgres business users, has to be defined at cluster level.
default values: []
, each object in the array defines a User/Role. Examples:
pg_users: # define business users/roles on this cluster, array of user definition
- name: dbuser_meta # REQUIRED, `name` is the only mandatory field of a user definition
password: DBUser.Meta # optional, password, can be a scram-sha-256 hash string or plain text
login: true # optional, can log in, true by default (new biz ROLE should be false)
superuser: false # optional, is superuser? false by default
createdb: false # optional, can create database? false by default
createrole: false # optional, can create role? false by default
inherit: true # optional, can this role use inherited privileges? true by default
replication: false # optional, can this role do replication? false by default
bypassrls: false # optional, can this role bypass row level security? false by default
pgbouncer: true # optional, add this user to pgbouncer user-list? false by default (production user should be true explicitly)
connlimit: -1 # optional, user connection limit, default -1 disable limit
expire_in: 3650 # optional, now + n days when this role is expired (OVERWRITE expire_at)
expire_at: '2030-12-31' # optional, YYYY-MM-DD 'timestamp' when this role is expired (OVERWRITTEN by expire_in)
comment: pigsty admin user # optional, comment string for this user/role
roles: [dbrole_admin] # optional, belonged roles. default roles are: dbrole_{admin,readonly,readwrite,offline}
parameters: {} # optional, role level parameters with `ALTER ROLE SET`
pool_mode: transaction # optional, pgbouncer pool mode at user level, transaction by default
pool_connlimit: -1 # optional, max database connections at user level, default -1 disable limit
search_path: public # key value config parameters according to postgresql documentation (e.g: use pigsty as default search_path)
- {name: dbuser_view ,password: DBUser.Viewer ,pgbouncer: true ,roles: [dbrole_readonly], comment: read-only viewer for meta database}
- {name: dbuser_grafana ,password: DBUser.Grafana ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for grafana database }
- {name: dbuser_bytebase ,password: DBUser.Bytebase ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for bytebase database }
- {name: dbuser_kong ,password: DBUser.Kong ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for kong api gateway }
- {name: dbuser_gitea ,password: DBUser.Gitea ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for gitea service }
- {name: dbuser_wiki ,password: DBUser.Wiki ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for wiki.js service }
- Each user or role must specify a
name
and the rest of the fields are optional, aname
must be unique in this list. -
password
is optional, if left blank then no password is set, you can use the MD5 ciphertext password. -
login
,superuser
,createdb
,createrole
,inherit
,replication
andbypassrls
are all boolean types used to set user attributes. If not set, the system defaults are used. - Users are created by
CREATE USER
, so they have thelogin
attribute by default. If the role is created, you need to specifylogin: false
. -
expire_at
andexpire_in
are used to control the user expiration time.expire_at
uses a date timestamp in the shape ofYYYY-mm-DD
.expire_in
uses the number of days to expire from now, and overrides theexpire_at
option ifexpire_in
exists. - New users are not added to the Pgbouncer user list by default, and
pgbouncer: true
must be explicitly defined for the user to be added to the Pgbouncer user list. - Users/roles are created sequentially, and users defined later can belong to the roles defined earlier.
-
pool_mode
,pool_connlimit
are user-level pgbouncer parameters that will override default settings. - Users can use pre-defined pg_default_roles with
roles
field:-
dbrole_readonly
: Default production read-only user with global read-only privileges. (Read-only production access) -
dbrole_offline
: Default offline read-only user with read-only access on a specific ins. (offline query, personal account, ETL) -
dbrole_readwrite
: Default production read/write user with global CRUD privileges. (Regular production use) -
dbrole_admin
: Default production management user with the privilege to execute DDL changes. (Admin User)
-
Configure pgbouncer: true
for the production account to add the user to pgbouncer; It's important to use a connection pool if you got thousands of clients.
name: pg_databases
, type: database[]
, level: C
postgres business databases, has to be defined at cluster level.
default values: []
, each object in the array defines a Database. Examples:
pg_databases: # define business databases on this cluster, array of database definition
- name: meta # REQUIRED, `name` is the only mandatory field of a database definition
baseline: cmdb.sql # optional, database sql baseline path, (relative path among ansible search path, e.g files/)
pgbouncer: true # optional, add this database to pgbouncer database list? true by default
schemas: [pigsty] # optional, additional schemas to be created, array of schema names
extensions: [{name: postgis}] # optional, additional extensions to be installed: array of `{name[,schema]}`
comment: pigsty meta database # optional, comment string for this database
owner: postgres # optional, database owner, postgres by default
template: template1 # optional, which template to use, template1 by default
encoding: UTF8 # optional, database encoding, UTF8 by default. (MUST same as template database)
locale: C # optional, database locale, C by default. (MUST same as template database)
lc_collate: C # optional, database collate, C by default. (MUST same as template database)
lc_ctype: C # optional, database ctype, C by default. (MUST same as template database)
tablespace: pg_default # optional, default tablespace, 'pg_default' by default.
allowconn: true # optional, allow connection, true by default. false will disable connect at all
revokeconn: false # optional, revoke public connection privilege. false by default. (leave connect with grant option to owner)
register_datasource: true # optional, register this database to grafana datasources? true by default
connlimit: -1 # optional, database connection limit, default -1 disable limit
pool_auth_user: dbuser_meta # optional, all connection to this pgbouncer database will be authenticated by this user
pool_mode: transaction # optional, pgbouncer pool mode at database level, default transaction
pool_size: 64 # optional, pgbouncer pool size at database level, default 64
pool_size_reserve: 32 # optional, pgbouncer pool size reserve at database level, default 32
pool_size_min: 0 # optional, pgbouncer pool size min at database level, default 0
pool_max_db_conn: 100 # optional, max database connections at database level, default 100
- { name: grafana ,owner: dbuser_grafana ,revokeconn: true ,comment: grafana primary database }
- { name: bytebase ,owner: dbuser_bytebase ,revokeconn: true ,comment: bytebase primary database }
- { name: kong ,owner: dbuser_kong ,revokeconn: true ,comment: kong the api gateway database }
- { name: gitea ,owner: dbuser_gitea ,revokeconn: true ,comment: gitea meta database }
- { name: wiki ,owner: dbuser_wiki ,revokeconn: true ,comment: wiki meta database }
In each database definition, the DB name
is mandatory and the rest are optional.
name: pg_services
, type: service[]
, level: C
postgres business services exposed via haproxy, has to be defined at cluster level.
You can define ad hoc services with pg_services
in additional to default pg_default_services
default values: []
, each object in the array defines a Service. Examples:
pg_services: # extra services in addition to pg_default_services, array of service definition
- name: standby # required, service name, the actual svc name will be prefixed with `pg_cluster`, e.g: pg-meta-standby
port: 5435 # required, service exposed port (work as kubernetes service node port mode)
ip: "*" # optional, service bind ip address, `*` for all ip by default
selector: "[]" # required, service member selector, use JMESPath to filter inventory
dest: pgbouncer # optional, destination port, postgres|pgbouncer|<port_number> , pgbouncer(6432) by default
check: /sync # optional, health check url path, / by default
backup: "[? pg_role == `primary`]" # backup server selector
maxconn: 3000 # optional, max allowed front-end connection
balance: roundrobin # optional, haproxy load balance algorithm (roundrobin by default, other: leastconn)
options: 'inter 3s fastinter 1s downinter 5s rise 3 fall 3 on-marked-down shutdown-sessions slowstart 30s maxconn 3000 maxqueue 128 weight 100'
name: pg_hba_rules
, type: hba[]
, level: C
business hba rules for postgres
default values: []
, each object in array is an HBA Rule definition:
Which are array of hba object, each hba object may look like
# RAW HBA RULES
- title: allow intranet password access
role: common
rules:
- host all all 10.0.0.0/8 md5
- host all all 172.16.0.0/12 md5
- host all all 192.168.0.0/16 md5
-
title
: Rule Title, transform into comment in hba file -
rules
: Array of strings, each string is a raw hba rule record -
role
: Applied roles, where to install these hba rules-
common
: apply for all instances -
primary
,replica
,standby
,offline
: apply on corresponding instances with thatpg_role
. - special case: HBA rule with
role == 'offline'
will be installed on instance withpg_offline_query
flag
-
or you can use another alias form
- addr: 'intra' # world|intra|infra|admin|local|localhost|cluster|<cidr>
auth: 'pwd' # trust|pwd|ssl|cert|deny|<official auth method>
user: 'all' # all|${dbsu}|${repl}|${admin}|${monitor}|<user>|<group>
db: 'all' # all|replication|....
rules: [] # raw hba string precedence over above all
title: allow intranet password access
pg_default_hba_rules
is similar to this, but is used for global HBA rule settings
name: pgb_hba_rules
, type: hba[]
, level: C
business hba rules for pgbouncer
default values: []
Similar to pg_hba_rules
, array of hba rule object, except this is for pgbouncer.
name: pg_replication_username
, type: username
, level: G
postgres replication username, replicator
by default
default values: replicator
, This parameter is globally used.
name: pg_replication_password
, type: password
, level: G
postgres replication password, DBUser.Replicator
by default
default values: DBUser.Replicator
WARNING: CHANGE THIS IN PRODUCTION ENVIRONMENT!!!!
name: pg_admin_username
, type: username
, level: G
postgres admin username, dbuser_dba
by default
default values: dbuser_dba
, which is a global postgres superuser.
name: pg_admin_password
, type: password
, level: G
postgres admin password in plain text, DBUser.DBA
by default
default values: DBUser.DBA
WARNING: CHANGE THIS IN PRODUCTION ENVIRONMENT!!!!
name: pg_monitor_username
, type: username
, level: G
postgres monitor username, dbuser_monitor
by default
default values: dbuser_monitor
, which is a global monitoring user.
name: pg_monitor_password
, type: password
, level: G
postgres monitor password, DBUser.Monitor
by default
default values: DBUser.Monitor
WARNING: CHANGE THIS IN PRODUCTION ENVIRONMENT!!!!
PG Install is responsible for installing PostgreSQL & Extensions.
If you wish to install a different major version, just make sure repo packages exists and overwrite pg_version
on cluster level.
pg_dbsu: postgres # os dbsu name, postgres by default, better not change it
pg_dbsu_uid: 26 # os dbsu uid and gid, 26 for default postgres users and groups
pg_dbsu_sudo: limit # dbsu sudo privilege, none,limit,all,nopass. limit by default
pg_dbsu_home: /var/lib/pgsql # postgresql home directory, `/var/lib/pgsql` by default
pg_dbsu_ssh_exchange: true # exchange postgres dbsu ssh key among same pgsql cluster
pg_version: 15 # postgres major version to be installed, 15 by default
pg_bin_dir: /usr/pgsql/bin # postgres binary dir, `/usr/pgsql/bin` by default
pg_log_dir: /pg/log/postgres # postgres log dir, `/pg/log/postgres` by default
pg_packages: # pg packages to be installed, `${pg_version}` will be replaced
- postgresql${pg_version}*
- pgbouncer pg_exporter pgbadger vip-manager patroni patroni-etcd pgbackrest
pg_extensions: # pg extensions to be installed, `${pg_version}` will be replaced
- postgis33_${pg_version}* pg_repack_${pg_version} wal2json_${pg_version} timescaledb-2-postgresql-${pg_version}
name: pg_dbsu
, type: username
, level: C
os dbsu name, postgres by default, better not change it
default values: postgres
When installing Greenplum / MatrixDB, modify this parameter to the corresponding recommended value: gpadmin|mxadmin
.
name: pg_dbsu_uid
, type: int
, level: C
os dbsu uid and gid, 26 for default postgres users and groups
default values: 26
, which is consistent with the official pgdg RPM.
name: pg_dbsu_sudo
, type: enum
, level: C
dbsu sudo privilege, none,limit,all,nopass. limit by default
default values: limit
, which only allow sudo systemctl <start|stop|reload> <postgres|patroni|pgbouncer|...>
-
none
: No Sudo privilege -
limit
: Limited sudo privilege to execute systemctl commands for database-related components, default. -
all
: Fullsudo
privilege, password required. -
nopass
: Fullsudo
privileges without a password (not recommended).
name: pg_dbsu_home
, type: path
, level: C
postgresql home directory, /var/lib/pgsql
by default
default values: /var/lib/pgsql
name: pg_dbsu_ssh_exchange
, type: bool
, level: C
exchange postgres dbsu ssh key among same pgsql cluster
default value is true
, means the dbsu can ssh to each other among same cluster.
name: pg_version
, type: enum
, level: C
postgres major version to be installed, 15 by default
default values: 15
Note that PostgreSQL physical stream replication cannot cross major versions, do not configure this on instance level.
name: pg_bin_dir
, type: path
, level: C
postgres binary dir, /usr/pgsql/bin
by default
default values: /usr/pgsql/bin
The default value is a soft link created manually during the installation process, pointing to the specific Postgres version dir installed.
For example /usr/pgsql -> /usr/pgsql-14
. For more details, check PGSQL File Structure for details.
name: pg_log_dir
, type: path
, level: C
postgres log dir, /pg/log/postgres
by default
default values: /pg/log/postgres
caveat: if
pg_log_dir
is prefixed withpg_data
it will not be created explicit (it will be created by postgres itself then).
name: pg_packages
, type: string[]
, level: C
pg packages to be installed, ${pg_version}
will be replaced to pg_version
default value:
pg_packages: # pg packages to be installed, `${pg_version}` will be replaced
- postgresql${pg_version}*
- pgbouncer pg_exporter pgbadger vip-manager patroni patroni-etcd pgbackrest
name: pg_extensions
, type: string[]
, level: C
pg extensions to be installed, ${pg_version}
will be replaced to pg_version
default value:
pg_extensions: # pg extensions to be installed, `${pg_version}` will be replaced
- postgis33_${pg_version}* pg_repack_${pg_version} wal2json_${pg_version} timescaledb-2-postgresql-${pg_version}
Some extensions are not available, or have a different name in different EL release. for example:
citus_15 # EL7
citus111_15 # EL8, EL9
Bootstrap a postgres cluster with patroni, and setup pgbouncer connection pool along withit.
It also init cluster template databases with default roles, schemas & extensions & default privileges.
Then it will create business databases & users and add them to pgbouncer & monitoring system
On a machine with Postgres, create a set of databases.
pg_safeguard: false # prevent purging running postgres instance? false by default
pg_clean: true # purging existing postgres during pgsql init? true by default
pg_data: /pg/data # postgres data directory, `/pg/data` by default
pg_fs_main: /data # mountpoint/path for postgres main data, `/data` by default
pg_fs_bkup: /data/backups # mountpoint/path for pg backup data, `/data/backup` by default
pg_storage_type: SSD # storage type for pg main data, SSD,HDD, SSD by default
pg_dummy_filesize: 64MiB # size of `/pg/dummy`, hold 64MB disk space for emergency use
pg_listen: '0.0.0.0' # postgres listen address, `0.0.0.0` (all ipv4 addr) by defaul
pg_port: 5432 # postgres listen port, 5432 by default
pg_localhost: /var/run/postgresql # postgres unix socket dir for localhost connection
pg_namespace: /pg # top level key namespace in etcd, used by patroni & vip
patroni_enabled: true # if disabled, no postgres cluster will be created during init
patroni_mode: default # patroni working mode: default,pause,remove
patroni_port: 8008 # patroni listen port, 8008 by default
patroni_log_dir: /pg/log/patroni # patroni log dir, `/pg/log/patroni` by default
patroni_ssl_enabled: false # secure patroni RestAPI communications with SSL?
patroni_watchdog_mode: off # patroni watchdog mode: automatic,required,off. off by default
patroni_username: postgres # patroni restapi username, `postgres` by default
patroni_password: Patroni.API # patroni restapi password, `Patroni.API` by default
pg_conf: oltp.yml # config template: oltp,olap,crit,tiny. `oltp.yml` by default
pg_max_conn: auto # postgres max connections, `auto` will use recommended value
pg_shmem_ratio: 0.25 # postgres shared memory ratio, 0.25 by default, 0.1~0.4
pg_rto: 30 # recovery time objective in seconds, `30s` by default
pg_rpo: 1048576 # recovery point objective in bytes, `1MiB` at most by default
pg_libs: 'timescaledb, pg_stat_statements, auto_explain' # extensions to be loaded
pg_delay: 0 # replication apply delay for standby cluster leader
pg_checksum: false # enable data checksum for postgres cluster?
pg_pwd_enc: scram-sha-256 # passwords encryption algorithm: md5,scram-sha-256
pg_encoding: UTF8 # database cluster encoding, `UTF8` by default
pg_locale: C # database cluster local, `C` by default
pg_lc_collate: C # database cluster collate, `C` by default
pg_lc_ctype: en_US.UTF8 # database character type, `en_US.UTF8` by default
pgbouncer_enabled: true # if disabled, pgbouncer will not be launched on pgsql host
pgbouncer_port: 6432 # pgbouncer listen port, 6432 by default
pgbouncer_log_dir: /pg/log/pgbouncer # pgbouncer log dir, `/pg/log/pgbouncer` by default
pgbouncer_auth_query: false # query postgres to retrieve unlisted business users?
pgbouncer_poolmode: transaction # pooling mode: transaction,session,statement, transaction by default
pgbouncer_sslmode: disable # pgbouncer client ssl mode, disable by default
name: pg_safeguard
, type: bool
, level: G/C/A
prevent purging running postgres instance? false by default
default value is false
, If enabled, pgsql.yml
& pgsql-rm.yml
will abort immediately if any postgres instance is running.
name: pg_clean
, type: bool
, level: G/C/A
purging existing postgres during pgsql init? true by default
default value is true
, it will purge existing postgres instance during pgsql.yml
init. which makes the playbook idempotent.
if set to false
, pgsql.yml
will abort if there's already a running postgres instance. and pgsql-rm.yml
will NOT remove postgres data (only stop the server).
name: pg_data
, type: path
, level: C
postgres data directory, /pg/data
by default
default values: /pg/data
, DO NOT CHANGE IT.
It's a soft link that point to underlying data directory.
Check PGSQL File Structure for details.
name: pg_fs_main
, type: path
, level: C
mountpoint/path for postgres main data, /data
by default
default values: /data
name: pg_fs_bkup
, type: path
, level: C
mountpoint/path for pg backup data, /data/backup
by default
default values: /data/backups
If you are using the default pgbackrest_method
= local
, It's wise to have a dedicated HDD (SSD is better of course) for backup storage.
The disk should be 3x large that your database files to keep 3 full backup retention, otherwise you may have to use minio
repo or reduce the retention period.
You can use a normal dir instead of a dedicated disk, through it is not recommended.
name: pg_storage_type
, type: enum
, level: C
storage type for pg main data, SSD,HDD, SSD by default
default values: SSD
, it will affect some optimization parameters
name: pg_dummy_filesize
, type: size
, level: C
size of /pg/dummy
, hold 64MB disk space for emergency use
default values: 64MiB
A placeholder file, a pre-allocated empty file that takes up disk space.
When the disk is full, removing the placeholder file can free up some space for emergency use, it is recommended to use 4GiB
, and 8GiB
for production env.
name: pg_listen
, type: ip
, level: C
postgres listen address, 0.0.0.0
(all ipv4 addr) by defaul
default values: 0.0.0.0
, which is all IPv4 address, if you want to include all IPv6 address, use *
instead.
name: pg_port
, type: port
, level: C
postgres listen port, 5432 by default
default values: 5432
name: pg_localhost
, type: path
, level: C
postgres unix socket dir for localhost connection
default values: /var/run/postgresql
The Unix socket dir for PostgreSQL and Pgbouncer local connection, which is used by pg_exporter
and patroni.
name: pg_namespace
, type: path
, level: C
top level key namespace in etcd, used by patroni & vip
default values: /pg
name: patroni_enabled
, type: bool
, level: C
if disabled, no postgres cluster will be created during init
default value is true
If disabled, Pigsty will skip pulling up patroni. This option is used when setting up extra staff for an existing ins.
name: patroni_mode
, type: enum
, level: C
patroni working mode: default,pause,remove
default values: default
-
default
: Bootstrap PostgreSQL cluster with Patroni -
pause
: Just likedefault
, but entering maintenance mode after bootstrap -
remove
: Init the cluster with Patroni, them remove Patroni and use raw PostgreSQL instead.
name: patroni_port
, type: port
, level: C
patroni listen port, 8008 by default
default values: 8008
The Patroni API server listens on this for health checking & unsafe API requests.
name: patroni_log_dir
, type: path
, level: C
patroni log dir, /pg/log/patroni
by default
default values: /pg/log/patroni
name: patroni_ssl_enabled
, type: bool
, level: G
secure patroni RestAPI communications with SSL?
default value is false
, This parameter can only be set before deployment
name: patroni_watchdog_mode
, type: string
, level: C
patroni watchdog mode: automatic,required,off. off by default
default value is off
-
off
: not usingwatchdog
. avoid fencing node. -
automatic
: Enablewatchdog
if the kernel hassoftdog
enabled, not forced, default behavior. -
required
: Forcewatchdog
, or refuse to start ifsoftdog
is not enabled on the system.
Enabling Watchdog means that the system prioritizes ensuring data consistency and drops availability. If availability is more important to your system, it is recommended to turn off Watchdog on the meta node.
name: patroni_username
, type: username
, level: C
patroni restapi username, postgres
by default
default values: postgres
name: patroni_password
, type: password
, level: C
patroni restapi password, Patroni.API
by default
default values: Patroni.API
name: patroni_citus_db
, type: string
, level: C
citus database managed by patroni, postgres
by default.
Patroni 3.0's native citus will specify a managed database for citus. which is created by patroni itself.
name: pg_conf
, type: enum
, level: C
config template: oltp,olap,crit,tiny. oltp.yml
by default
default values: oltp.yml
name: pg_max_conn
, type: int
, level: C
postgres max connections, auto
will use recommended value
default values: auto
name: pg_shmem_ratio
, type: float
, level: C
postgres shared memory ratio, 0.25 by default, 0.1~0.4
default values: 0.25
name: pg_rto
, type: int
, level: C
recovery time objective in seconds, 30s
by default
default values: 30
name: pg_rpo
, type: int
, level: C
recovery point objective in bytes, 1MiB
at most by default
default values: 1048576
name: pg_libs
, type: string
, level: C
preloaded libraries, pg_stat_statements,auto_explain
by default
default value: timescaledb, pg_stat_statements, auto_explain
name: pg_delay
, type: interval
, level: I
replication apply delay for standby cluster leader
default values: 0
name: pg_checksum
, type: bool
, level: C
enable data checksum for postgres cluster?
default value is false
name: pg_pwd_enc
, type: enum
, level: C
passwords encryption algorithm: md5,scram-sha-256
default values: scram-sha-256
name: pg_encoding
, type: enum
, level: C
database cluster encoding, UTF8
by default
default values: UTF8
name: pg_locale
, type: enum
, level: C
database cluster local, C
by default
default values: C
name: pg_lc_collate
, type: enum
, level: C
database cluster collate, C
by default
default values: C
name: pg_lc_ctype
, type: enum
, level: C
database character type, en_US.UTF8
by default
default values: en_US.UTF8
name: pgbouncer_enabled
, type: bool
, level: C
if disabled, pgbouncer will not be launched on pgsql host
default value is true
name: pgbouncer_port
, type: port
, level: C
pgbouncer listen port, 6432 by default
default values: 6432
name: pgbouncer_log_dir
, type: path
, level: C
pgbouncer log dir, /pg/log/pgbouncer
by default
default values: /pg/log/pgbouncer
name: pgbouncer_auth_query
, type: bool
, level: C
query postgres to retrieve unlisted business users?
default value is false
name: pgbouncer_poolmode
, type: enum
, level: C
pooling mode: transaction,session,statement, transaction by default
default values: transaction
name: pgbouncer_sslmode
, type: enum
, level: C
pgbouncer client ssl mode, disable by default
default values: disable
name: pg_provision
, type: bool
, level: C
provision postgres cluster after bootstrap
default value is true
pg_provision: true # provision postgres cluster after bootstrap
pg_init: pg-init # provision init script for cluster template, `pg-init` by default
pg_default_roles: # default roles and users in postgres cluster
- { name: dbrole_readonly ,login: false ,comment: role for global read-only access }
- { name: dbrole_offline ,login: false ,comment: role for restricted read-only access }
- { name: dbrole_readwrite ,login: false ,roles: [dbrole_readonly] ,comment: role for global read-write access }
- { name: dbrole_admin ,login: false ,roles: [pg_monitor, dbrole_readwrite] ,comment: role for object creation }
- { name: postgres ,superuser: true ,comment: system superuser }
- { name: replicator ,replication: true ,roles: [pg_monitor, dbrole_readonly] ,comment: system replicator }
- { name: dbuser_dba ,superuser: true ,roles: [dbrole_admin] ,pgbouncer: true ,pool_mode: session, pool_connlimit: 16 , comment: pgsql admin user }
- { name: dbuser_monitor ,roles: [pg_monitor, dbrole_readonly] ,pgbouncer: true ,parameters: {log_min_duration_statement: 1000 } ,pool_mode: session ,pool_connlimit: 8 ,comment: pgsql monitor user }
pg_default_privileges: # default privileges when created by admin user
- GRANT USAGE ON SCHEMAS TO dbrole_readonly
- GRANT SELECT ON TABLES TO dbrole_readonly
- GRANT SELECT ON SEQUENCES TO dbrole_readonly
- GRANT EXECUTE ON FUNCTIONS TO dbrole_readonly
- GRANT USAGE ON SCHEMAS TO dbrole_offline
- GRANT SELECT ON TABLES TO dbrole_offline
- GRANT SELECT ON SEQUENCES TO dbrole_offline
- GRANT EXECUTE ON FUNCTIONS TO dbrole_offline
- GRANT INSERT ON TABLES TO dbrole_readwrite
- GRANT UPDATE ON TABLES TO dbrole_readwrite
- GRANT DELETE ON TABLES TO dbrole_readwrite
- GRANT USAGE ON SEQUENCES TO dbrole_readwrite
- GRANT UPDATE ON SEQUENCES TO dbrole_readwrite
- GRANT TRUNCATE ON TABLES TO dbrole_admin
- GRANT REFERENCES ON TABLES TO dbrole_admin
- GRANT TRIGGER ON TABLES TO dbrole_admin
- GRANT CREATE ON SCHEMAS TO dbrole_admin
pg_default_schemas: [ monitor ] # default schemas to be created
pg_default_extensions: # default extensions to be created
- { name: adminpack ,schema: pg_catalog }
- { name: pg_stat_statements ,schema: monitor }
- { name: pgstattuple ,schema: monitor }
- { name: pg_buffercache ,schema: monitor }
- { name: pageinspect ,schema: monitor }
- { name: pg_prewarm ,schema: monitor }
- { name: pg_visibility ,schema: monitor }
- { name: pg_freespacemap ,schema: monitor }
- { name: postgres_fdw ,schema: public }
- { name: file_fdw ,schema: public }
- { name: btree_gist ,schema: public }
- { name: btree_gin ,schema: public }
- { name: pg_trgm ,schema: public }
- { name: intagg ,schema: public }
- { name: intarray ,schema: public }
- { name: pg_repack }
pg_reload: true # reload postgres after hba changes
pg_default_hba_rules: # postgres default host-based authentication rules
- {user: '${dbsu}' ,db: all ,addr: local ,auth: ident ,title: 'dbsu access via local os user ident' }
- {user: '${dbsu}' ,db: replication ,addr: local ,auth: ident ,title: 'dbsu replication from local os ident' }
- {user: '${repl}' ,db: replication ,addr: localhost ,auth: pwd ,title: 'replicator replication from localhost'}
- {user: '${repl}' ,db: replication ,addr: intra ,auth: pwd ,title: 'replicator replication from intranet' }
- {user: '${repl}' ,db: postgres ,addr: intra ,auth: pwd ,title: 'replicator postgres db from intranet' }
- {user: '${monitor}' ,db: all ,addr: localhost ,auth: pwd ,title: 'monitor from localhost with password' }
- {user: '${monitor}' ,db: all ,addr: infra ,auth: pwd ,title: 'monitor from infra host with password'}
- {user: '${admin}' ,db: all ,addr: infra ,auth: ssl ,title: 'admin @ infra nodes with pwd & ssl' }
- {user: '${admin}' ,db: all ,addr: world ,auth: cert ,title: 'admin @ everywhere with ssl & cert' }
- {user: '+dbrole_readonly',db: all ,addr: localhost ,auth: pwd ,title: 'pgbouncer read/write via local socket'}
- {user: '+dbrole_readonly',db: all ,addr: intra ,auth: pwd ,title: 'read/write biz user via password' }
- {user: '+dbrole_offline' ,db: all ,addr: intra ,auth: pwd ,title: 'allow etl offline tasks from intranet'}
pgb_default_hba_rules: # pgbouncer default host-based authentication rules
- {user: '${dbsu}' ,db: pgbouncer ,addr: local ,auth: peer ,title: 'dbsu local admin access with os ident'}
- {user: 'all' ,db: all ,addr: localhost ,auth: pwd ,title: 'allow all user local access with pwd' }
- {user: '${monitor}' ,db: pgbouncer ,addr: intra ,auth: pwd ,title: 'monitor access via intranet with pwd' }
- {user: '${monitor}' ,db: all ,addr: world ,auth: deny ,title: 'reject all other monitor access addr' }
- {user: '${admin}' ,db: all ,addr: intra ,auth: pwd ,title: 'admin access via intranet with pwd' }
- {user: '${admin}' ,db: all ,addr: world ,auth: deny ,title: 'reject all other admin access addr' }
- {user: 'all' ,db: all ,addr: intra ,auth: pwd ,title: 'allow all user intra access with pwd' }
pg_default_service_dest: pgbouncer # default service destination if svc.dest='default'
pg_default_services: # postgres default service definitions
- { name: primary ,port: 5433 ,dest: default ,check: /primary ,selector: "[]" }
- { name: replica ,port: 5434 ,dest: default ,check: /read-only ,selector: "[]" , backup: "[? pg_role == `primary` || pg_role == `offline` ]" }
- { name: default ,port: 5436 ,dest: postgres ,check: /primary ,selector: "[]" }
- { name: offline ,port: 5438 ,dest: postgres ,check: /replica ,selector: "[? pg_role == `offline` || pg_offline_query ]" , backup: "[? pg_role == `replica` && !pg_offline_query]"}
name: pg_init
, type: string
, level: G/C
provision init script for cluster template, pg-init
by default
default values: pg-init
name: pg_default_roles
, type: role[]
, level: G/C
default roles and users in postgres cluster
default value:
pg_default_roles: # default roles and users in postgres cluster
- { name: dbrole_readonly ,login: false ,comment: role for global read-only access }
- { name: dbrole_offline ,login: false ,comment: role for restricted read-only access }
- { name: dbrole_readwrite ,login: false ,roles: [dbrole_readonly] ,comment: role for global read-write access }
- { name: dbrole_admin ,login: false ,roles: [pg_monitor, dbrole_readwrite] ,comment: role for object creation }
- { name: postgres ,superuser: true ,comment: system superuser }
- { name: replicator ,replication: true ,roles: [pg_monitor, dbrole_readonly] ,comment: system replicator }
- { name: dbuser_dba ,superuser: true ,roles: [dbrole_admin] ,pgbouncer: true ,pool_mode: session, pool_connlimit: 16 , comment: pgsql admin user }
- { name: dbuser_monitor ,roles: [pg_monitor, dbrole_readonly] ,pgbouncer: true ,parameters: {log_min_duration_statement: 1000 } ,pool_mode: session ,pool_connlimit: 8 ,comment: pgsql monitor user }
name: pg_default_privileges
, type: string[]
, level: G/C
default privileges when created by admin user
default value:
pg_default_privileges: # default privileges when created by admin user
- GRANT USAGE ON SCHEMAS TO dbrole_readonly
- GRANT SELECT ON TABLES TO dbrole_readonly
- GRANT SELECT ON SEQUENCES TO dbrole_readonly
- GRANT EXECUTE ON FUNCTIONS TO dbrole_readonly
- GRANT USAGE ON SCHEMAS TO dbrole_offline
- GRANT SELECT ON TABLES TO dbrole_offline
- GRANT SELECT ON SEQUENCES TO dbrole_offline
- GRANT EXECUTE ON FUNCTIONS TO dbrole_offline
- GRANT INSERT ON TABLES TO dbrole_readwrite
- GRANT UPDATE ON TABLES TO dbrole_readwrite
- GRANT DELETE ON TABLES TO dbrole_readwrite
- GRANT USAGE ON SEQUENCES TO dbrole_readwrite
- GRANT UPDATE ON SEQUENCES TO dbrole_readwrite
- GRANT TRUNCATE ON TABLES TO dbrole_admin
- GRANT REFERENCES ON TABLES TO dbrole_admin
- GRANT TRIGGER ON TABLES TO dbrole_admin
- GRANT CREATE ON SCHEMAS TO dbrole_admin
name: pg_default_schemas
, type: string[]
, level: G/C
default schemas to be created
default values: [ monitor ]
name: pg_default_extensions
, type: extension[]
, level: G/C
default extensions to be created
default value:
pg_default_extensions: # default extensions to be created
- { name: adminpack ,schema: pg_catalog }
- { name: pg_stat_statements ,schema: monitor }
- { name: pgstattuple ,schema: monitor }
- { name: pg_buffercache ,schema: monitor }
- { name: pageinspect ,schema: monitor }
- { name: pg_prewarm ,schema: monitor }
- { name: pg_visibility ,schema: monitor }
- { name: pg_freespacemap ,schema: monitor }
- { name: postgres_fdw ,schema: public }
- { name: file_fdw ,schema: public }
- { name: btree_gist ,schema: public }
- { name: btree_gin ,schema: public }
- { name: pg_trgm ,schema: public }
- { name: intagg ,schema: public }
- { name: intarray ,schema: public }
- { name: pg_repack }
name: pg_reload
, type: bool
, level: A
reload postgres after hba changes
default value is true
name: pg_default_hba_rules
, type: hba[]
, level: G/C
postgres default host-based authentication rules, array of hba rule object.
default value:
pg_default_hba_rules: # postgres default host-based authentication rules
- {user: '${dbsu}' ,db: all ,addr: local ,auth: ident ,title: 'dbsu access via local os user ident' }
- {user: '${dbsu}' ,db: replication ,addr: local ,auth: ident ,title: 'dbsu replication from local os ident' }
- {user: '${repl}' ,db: replication ,addr: localhost ,auth: pwd ,title: 'replicator replication from localhost'}
- {user: '${repl}' ,db: replication ,addr: intra ,auth: pwd ,title: 'replicator replication from intranet' }
- {user: '${repl}' ,db: postgres ,addr: intra ,auth: pwd ,title: 'replicator postgres db from intranet' }
- {user: '${monitor}' ,db: all ,addr: localhost ,auth: pwd ,title: 'monitor from localhost with password' }
- {user: '${monitor}' ,db: all ,addr: infra ,auth: pwd ,title: 'monitor from infra host with password'}
- {user: '${admin}' ,db: all ,addr: infra ,auth: ssl ,title: 'admin @ infra nodes with pwd & ssl' }
- {user: '${admin}' ,db: all ,addr: world ,auth: cert ,title: 'admin @ everywhere with ssl & cert' }
- {user: '+dbrole_readonly',db: all ,addr: localhost ,auth: pwd ,title: 'pgbouncer read/write via local socket'}
- {user: '+dbrole_readonly',db: all ,addr: intra ,auth: pwd ,title: 'read/write biz user via password' }
- {user: '+dbrole_offline' ,db: all ,addr: intra ,auth: pwd ,title: 'allow etl offline tasks from intranet'}
name: pgb_default_hba_rules
, type: hba[]
, level: G/C
pgbouncer default host-based authentication rules, array or hba rule object.
default value:
pgb_default_hba_rules: # pgbouncer default host-based authentication rules
- {user: '${dbsu}' ,db: pgbouncer ,addr: local ,auth: peer ,title: 'dbsu local admin access with os ident'}
- {user: 'all' ,db: all ,addr: localhost ,auth: pwd ,title: 'allow all user local access with pwd' }
- {user: '${monitor}' ,db: pgbouncer ,addr: intra ,auth: pwd ,title: 'monitor access via intranet with pwd' }
- {user: '${monitor}' ,db: all ,addr: world ,auth: deny ,title: 'reject all other monitor access addr' }
- {user: '${admin}' ,db: all ,addr: intra ,auth: pwd ,title: 'admin access via intranet with pwd' }
- {user: '${admin}' ,db: all ,addr: world ,auth: deny ,title: 'reject all other admin access addr' }
- {user: 'all' ,db: all ,addr: intra ,auth: pwd ,title: 'allow all user intra access with pwd' }
name: pg_default_service_dest
, type: enum
, level: G/C
default service destination if svc.dest='default'
default values: pgbouncer
name: pg_default_services
, type: service[]
, level: G/C
postgres default service definitions
default value:
pg_default_services: # postgres default service definitions
- { name: primary ,port: 5433 ,dest: default ,check: /primary ,selector: "[]" }
- { name: replica ,port: 5434 ,dest: default ,check: /read-only ,selector: "[]" , backup: "[? pg_role == `primary` || pg_role == `offline` ]" }
- { name: default ,port: 5436 ,dest: postgres ,check: /primary ,selector: "[]" }
- { name: offline ,port: 5438 ,dest: postgres ,check: /replica ,selector: "[? pg_role == `offline` || pg_offline_query ]" , backup: "[? pg_role == `replica` && !pg_offline_query]"}
pgbackrest_enabled: true # enable pgbackrest on pgsql host?
pgbackrest_clean: true # remove pg backup data during init?
pgbackrest_log_dir: /pg/log/pgbackrest # pgbackrest log dir, `/pg/log/pgbackrest` by default
pgbackrest_method: local # pgbackrest repo method: local,minio,[user-defined...]
pgbackrest_repo: # pgbackrest repo: https://pgbackrest.org/configuration.html#section-repository
local: # default pgbackrest repo with local posix fs
path: /pg/backup # local backup directory, `/pg/backup` by default
retention_full_type: count # retention full backups by count
retention_full: 2 # keep 2, at most 3 full backup when using local fs repo
minio: # optional minio repo for pgbackrest
type: s3 # minio is s3-compatible, so s3 is used
s3_endpoint: sss.pigsty # minio endpoint domain name, `sss.pigsty` by default
s3_region: us-east-1 # minio region, us-east-1 by default, useless for minio
s3_bucket: pgsql # minio bucket name, `pgsql` by default
s3_key: pgbackrest # minio user access key for pgbackrest
s3_key_secret: S3User.Backup # minio user secret key for pgbackrest
s3_uri_style: path # use path style uri for minio rather than host style
path: /pgbackrest # minio backup path, default is `/pgbackrest`
storage_port: 9000 # minio port, 9000 by default
storage_ca_file: /etc/pki/ca.crt # minio ca file path, `/etc/pki/ca.crt` by default
bundle: y # bundle small files into a single file
cipher_type: aes-256-cbc # enable AES encryption for remote backup repo
cipher_pass: pgBackRest # AES encryption password, default is 'pgBackRest'
retention_full_type: time # retention full backup by time on minio repo
retention_full: 14 # keep full backup for last 14 days
name: pgbackrest_enabled
, type: bool
, level: C
enable pgbackrest on pgsql host?
default value is true
name: pgbackrest_clean
, type: bool
, level: C
remove pg backup data during init?
default value is true
name: pgbackrest_log_dir
, type: path
, level: C
pgbackrest log dir, /pg/log/pgbackrest
by default
default values: /pg/log/pgbackrest
name: pgbackrest_method
, type: enum
, level: C
pgbackrest repo method: local,minio,[user-defined...]
default values: local
name: pgbackrest_repo
, type: dict
, level: G/C
pgbackrest repo: https://pgbackrest.org/configuration.html#section-repository
default value:
pgbackrest_repo: # pgbackrest repo: https://pgbackrest.org/configuration.html#section-repository
local: # default pgbackrest repo with local posix fs
path: /pg/backup # local backup directory, `/pg/backup` by default
retention_full_type: count # retention full backups by count
retention_full: 2 # keep 2, at most 3 full backup when using local fs repo
minio: # optional minio repo for pgbackrest
type: s3 # minio is s3-compatible, so s3 is used
s3_endpoint: sss.pigsty # minio endpoint domain name, `sss.pigsty` by default
s3_region: us-east-1 # minio region, us-east-1 by default, useless for minio
s3_bucket: pgsql # minio bucket name, `pgsql` by default
s3_key: pgbackrest # minio user access key for pgbackrest
s3_key_secret: S3User.Backup # minio user secret key for pgbackrest
s3_uri_style: path # use path style uri for minio rather than host style
path: /pgbackrest # minio backup path, default is `/pgbackrest`
storage_port: 9000 # minio port, 9000 by default
storage_ca_file: /etc/pki/ca.crt # minio ca file path, `/etc/pki/ca.crt` by default
bundle: y # bundle small files into a single file
cipher_type: aes-256-cbc # enable AES encryption for remote backup repo
cipher_pass: pgBackRest # AES encryption password, default is 'pgBackRest'
retention_full_type: time # retention full backup by time on minio repo
retention_full: 14 # keep full backup for last 14 days
pg_vip_enabled: false # enable a l2 vip for pgsql primary? false by default
pg_vip_address: 127.0.0.1/24 # vip address in `<ipv4>/<mask>` format, require if vip is enabled
pg_vip_interface: eth0 # vip network interface to listen, eth0 by default
name: pg_vip_enabled
, type: bool
, level: C
enable a l2 vip for pgsql primary? false by default
default value is false
name: pg_vip_address
, type: cidr4
, level: C
vip address in <ipv4>/<mask>
format, require if vip is enabled
default values: 127.0.0.1/24
name: pg_vip_interface
, type: string
, level: C/I
vip network interface to listen, eth0 by default
default values: eth0
pg_dns_suffix: '' # pgsql dns suffix, '' by default
pg_dns_target: auto # auto, primary, vip, none, or ad hoc ip
name: pg_dns_suffix
, type: string
, level: C
pgsql dns suffix, '' by default
default value is empty string
name: pg_dns_target
, type: enum
, level: C
auto, primary, vip, none, or ad hoc ip
default values: auto
pg_exporter_enabled: true # enable pg_exporter on pgsql hosts?
pg_exporter_config: pg_exporter.yml # pg_exporter configuration file name
pg_exporter_cache_ttls: '1,10,60,300' # pg_exporter collector ttl stage in seconds, '1,10,60,300' by default
pg_exporter_port: 9630 # pg_exporter listen port, 9630 by default
pg_exporter_params: 'sslmode=disable' # extra url parameters for pg_exporter dsn
pg_exporter_url: '' # overwrite auto-generate pg dsn if specified
pg_exporter_auto_discovery: true # enable auto database discovery? enabled by default
pg_exporter_exclude_database: 'template0,template1,postgres' # csv of database that WILL NOT be monitored during auto-discovery
pg_exporter_include_database: '' # csv of database that WILL BE monitored during auto-discovery
pg_exporter_connect_timeout: 200 # pg_exporter connect timeout in ms, 200 by default
pg_exporter_options: '' # overwrite extra options for pg_exporter
pgbouncer_exporter_enabled: true # enable pgbouncer_exporter on pgsql hosts?
pgbouncer_exporter_port: 9631 # pgbouncer_exporter listen port, 9631 by default
pgbouncer_exporter_url: '' # overwrite auto-generate pgbouncer dsn if specified
pgbouncer_exporter_options: '' # overwrite extra options for pgbouncer_exporter
name: pg_exporter_enabled
, type: bool
, level: C
enable pg_exporter on pgsql hosts?
default value is true
name: pg_exporter_config
, type: string
, level: C
pg_exporter configuration file name
default values: pg_exporter.yml
name: pg_exporter_cache_ttls
, type: string
, level: C
pg_exporter collector ttl stage in seconds, '1,10,60,300' by default
default values: 1,10,60,300
name: pg_exporter_port
, type: port
, level: C
pg_exporter listen port, 9630 by default
default values: 9630
name: pg_exporter_params
, type: string
, level: C
extra url parameters for pg_exporter dsn
default values: sslmode=disable
name: pg_exporter_url
, type: pgurl
, level: C
overwrite auto-generate pg dsn if specified
default value is empty string
name: pg_exporter_auto_discovery
, type: bool
, level: C
enable auto database discovery? enabled by default
default value is true
name: pg_exporter_exclude_database
, type: string
, level: C
csv of database that WILL NOT be monitored during auto-discovery
default values: template0,template1,postgres
name: pg_exporter_include_database
, type: string
, level: C
csv of database that WILL BE monitored during auto-discovery
default value is empty string
name: pg_exporter_connect_timeout
, type: int
, level: C
pg_exporter connect timeout in ms, 200 by default
default values: 200
name: pg_exporter_options
, type: arg
, level: C
overwrite extra options for pg_exporter
default value is empty string
name: pgbouncer_exporter_enabled
, type: bool
, level: C
enable pgbouncer_exporter on pgsql hosts?
default value is true
name: pgbouncer_exporter_port
, type: port
, level: C
pgbouncer_exporter listen port, 9631 by default
default values: 9631
name: pgbouncer_exporter_url
, type: pgurl
, level: C
overwrite auto-generate pgbouncer dsn if specified
default value is empty string
name: pgbouncer_exporter_options
, type: arg
, level: C
overwrite extra options for pgbouncer_exporter
default value is empty string
#redis_cluster: <CLUSTER> # redis cluster name, required identity parameter
#redis_node: 1 <NODE> # redis node sequence number, node int id required
#redis_instances: {} <NODE> # redis instances definition on this redis node
name: redis_instances
, type: dict
, level: I
redis instances definition on this redis node
no default value
name: redis_node
, type: int
, level: I
redis node sequence number, node int id required
no default value
name: redis_cluster
, type: string
, level: C
redis cluster name, required identity parameter
no default value
redis_fs_main: /data # redis main data mountpoint, `/data` by default
redis_exporter_enabled: true # install redis exporter on redis nodes?
redis_exporter_port: 9121 # redis exporter listen port, 9121 by default
redis_exporter_options: '' # cli args and extra options for redis exporter
name: redis_fs_main
, type: path
, level: C
redis main data mountpoint, /data
by default
default values: /data
name: redis_exporter_enabled
, type: bool
, level: C
install redis exporter on redis nodes?
default value is true
name: redis_exporter_port
, type: port
, level: C
redis exporter listen port, 9121 by default
default values: 9121
name: redis_exporter_options
, type: string
, level: C/I
cli args and extra options for redis exporter
default value is empty string
redis_safeguard: false # prevent purging running redis instance?
redis_clean: true # purging existing redis during init?
redis_rmdata: true # remove redis data when purging redis server?
redis_mode: standalone # redis mode: standalone,cluster,sentinel
redis_conf: redis.conf # redis config template path, except sentinel
redis_bind_address: '0.0.0.0' # redis bind address, empty string will use host ip
redis_max_memory: 1GB # max memory used by each redis instance
redis_mem_policy: allkeys-lru # redis memory eviction policy
redis_password: '' # redis password, empty string will disable password
redis_rdb_save: ['1200 1'] # redis rdb save directives, disable with empty list
redis_aof_enabled: false # enable redis append only file?
redis_rename_commands: {} # rename redis dangerous commands
redis_cluster_replicas: 1 # replica number for one master in redis cluster
name: redis_safeguard
, type: bool
, level: C
prevent purging running redis instance?
default value is false
name: redis_clean
, type: bool
, level: C
purging existing redis during init?
default value is true
name: redis_rmdata
, type: bool
, level: A
remove redis data when purging redis server?
default value is true
name: redis_mode
, type: enum
, level: C
redis mode: standalone,cluster,sentinel
default values: standalone
-
standalone
: setup redis as standalone (master-slave) mode -
cluster
: setup this redis cluster as a redis native cluster -
sentinel
: setup redis as sentinel for standalone redis HA
name: redis_conf
, type: string
, level: C
redis config template path, except sentinel
default values: redis.conf
name: redis_bind_address
, type: ip
, level: C
redis bind address, empty string will use host ip
default values: 0.0.0.0
name: redis_max_memory
, type: size
, level: C/I
max memory used by each redis instance
default values: 1GB
name: redis_mem_policy
, type: enum
, level: C
redis memory eviction policy
default values: allkeys-lru
name: redis_password
, type: password
, level: C
redis password, empty string will disable password
default value is empty string
name: redis_rdb_save
, type: string[]
, level: C
redis rdb save directives, disable with empty list
default value:
["1200 1"]
name: redis_aof_enabled
, type: bool
, level: C
enable redis append only file?
default value is false
name: redis_rename_commands
, type: dict
, level: C
rename redis dangerous commands
default values: {}
name: redis_cluster_replicas
, type: int
, level: C
replica number for one master in redis cluster
default values: 1