Demo:Public Demo
Pigsty 4-node public demo config
---
######################################################################
# File : pigsty.yml
# Desc : Pigsty 4-node public demo config
# Link : https://pigsty.cc/#/v-config
# Ctime : 2020-05-22
# Mtime : 2022-05-22
# Copyright (C) 2018-2022 Ruohang Feng (rh@vonng.com)
######################################################################
######################################################################
# Sandbox (4-node) #
#====================================================================#
# admin user : vagrant (nopass ssh & sudo already set) #
# 1. meta : 10.10.10.10 (2 Core | 4GB) pg-meta #
# 2. node-1 : 10.10.10.11 (1 Core | 1GB) pg-test-1 #
# 3. node-2 : 10.10.10.12 (1 Core | 1GB) pg-test-2 #
# 4. node-3 : 10.10.10.13 (1 Core | 1GB) pg-test-3 #
# (replace these ip if your 4-node env have different ip addr) #
# VIP 2: (l2 vip is available inside same LAN ) #
# pg-meta ---> 10.10.10.2 ---> 10.10.10.10 #
# pg-test ---> 10.10.10.3 ---> 10.10.10.1{1,2,3} #
######################################################################
all:
##################################################################
# CLUSTERS #
##################################################################
# meta nodes, nodes, pgsql, redis, pgsql clusters are defined as
# k:v pair inside `all.children`. Where the key is cluster name
# and value is cluster definition consist of two parts:
# `hosts`: cluster members ip and instance level variables
# `vars` : cluster level variables
##################################################################
children: # groups definition
#================================================================#
# Meta Nodes: Admin Controller #
#================================================================#
meta: # meta nodes are defined in this special group "meta"
vars:
meta_node: true # mark this group as meta nodes
ansible_group_priority: 99 # overwrite with the highest priority
nginx_enabled: true # setup repo & underlying nginx
nameserver_enabled: false # setup dnsmasq
prometheus_enabled: true # setup prometheus
grafana_enabled: true # setup grafana
loki_enabled: true # setup loki
docker_enabled: true # setup docker
dcs_safeguard: false # protect dcs server from fat fingers
hosts: # add nodes to group 'meta'
10.10.10.10: { }
# 10.10.10.11: { nginx_enabled: false }
# 10.10.10.12: { nginx_enabled: false }
#================================================================#
#================================================================#
# PGSQL Clusters #
#================================================================#
#----------------------------------#
# pgsql cluster: pg-meta (CMDB) #
#----------------------------------#
pg-meta:
hosts: { 10.10.10.10: { pg_seq: 1, pg_role: primary , pg_offline_query: true } }
vars:
pg_cluster: pg-meta
patroni_watchdog_mode: off
pg_databases:
- { name: meta, baseline: cmdb.sql , schemas: [ pigsty ] , extensions: [ { name: adminpack, schema: pg_catalog } , { name: postgis, schema: public } , { name: timescaledb } ] ,comment: pigsty meta database }
- { name: grafana, owner: dbuser_grafana , revokeconn: true , comment: grafana primary database }
- { name: bytebase, owner: dbuser_bytebase , revokeconn: true , comment: bytebase primary database }
- { name: kong, owner: dbuser_kong , revokeconn: true , comment: kong the api gateway database }
- { name: gitea, owner: dbuser_gitea , revokeconn: true , comment: gitea meta database }
pg_users:
- {name: dbuser_meta ,password: DBUser.Meta ,pgbouncer: true ,roles: [dbrole_admin] ,comment: pigsty cmdb admin user }
- {name: dbuser_view ,password: DBUser.Viewer ,pgbouncer: true ,roles: [dbrole_readonly] ,comment: read-only viewer for meta database }
- {name: dbuser_grafana ,password: DBUser.Grafana ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for grafana database }
- {name: dbuser_bytebase ,password: DBUser.Bytebase ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for bytebase database }
- {name: dbuser_kong ,password: DBUser.Kong ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for kong api gateway }
- {name: dbuser_gitea ,password: DBUser.Gitea ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for gitea service }
pg_hba_rules_extra:
- title: allow grafana bytebase kong intranet access
role: common
rules:
- host kong dbuser_kong 10.0.0.0/8 md5
- host bytebase dbuser_bytebase 10.0.0.0/8 md5
- host grafana dbuser_grafana 10.0.0.0/8 md5
vip_mode: l2
vip_address: 10.10.10.2
vip_cidrmask: 8
vip_interface: eth1
node_crontab:
- '00 01 * * * postgres /pg/bin/pg-backup 2>>/pg/log/backup.log'
- '00 00 * * * root bash -c "cd /root/pigsty && make ri >> /tmp/bench.log"'
- '00 00 * * * root bash -c "cd /root/pigsty && make test-ri >> /tmp/bench.log"'
- '* * * * * root /usr/pgsql/bin/pgbench -nv -P1 -c4 --rate=32 -T20 postgres://dbuser_meta:DBUser.Meta@meta:5433/meta 2>>/tmp/bench.log'
- '* * * * * root /usr/pgsql/bin/pgbench -nv -P1 -c4 --rate=16 -T30 postgres://test:test@pg-test:5433/test 2>>/tmp/bench.log'
- '* * * * * root /usr/pgsql/bin/pgbench --select-only -nv -P1 -c4 --rate=128 -T40 postgres://test:test@pg-test:5434/test 2>>/tmp/bench.log'
- |
* * * * * root bash -c "cat /pg/data/log/postgresql-*.csv | psql service=meta -AXtwc 'TRUNCATE pglog.sample CASCADE; COPY pglog.sample14 FROM STDIN CSV;'"
- '00 01 * * * root /root/pigsty/bin/schemaspy 10.10.10.10 meta pigsty 2>> /tmp/schemaspy.log'
- '00 * * * * root /root/pigsty/bin/pglog-summary 10.10.10.10 2>> /tmp/pgbadger.log'
- '00 * * * * root /root/pigsty/bin/pglog-summary 10.10.10.11 2>> /tmp/pgbadger.log'
- '00 * * * * root /root/pigsty/bin/pglog-summary 10.10.10.12 2>> /tmp/pgbadger.log'
- '00 * * * * root /root/pigsty/bin/pglog-summary 10.10.10.13 2>> /tmp/pgbadger.log'
- '00 01 * * * root bash -c "cd /root/app/covid && make reload 2>> /tmp/covid.log"'
#----------------------------------#
# pgsql cluster: pg-test (3 nodes) #
#----------------------------------#
# pg-test ---> 10.10.10.3 ---> 10.10.10.1{1,2,3}
pg-test: # define the new 3-node cluster pg-test
hosts:
10.10.10.11: { pg_seq: 1, pg_role: primary } # primary instance, leader of cluster
10.10.10.12: { pg_seq: 2, pg_role: replica } # replica instance, follower of leader
10.10.10.13: { pg_seq: 3, pg_role: replica, pg_offline_query: true } # replica with offline access
vars:
pg_cluster: pg-test # define pgsql cluster name
pg_version: 14 # test postgresql 13 with pg-test cluster
pg_users:
- name: test
password: test
pgbouncer: true
roles: [ dbrole_admin ]
pg_databases: # create a database and user named 'test'
- name: test
vip_mode: l2 # enable/disable vip (require members in same LAN)
vip_address: 10.10.10.3 # virtual ip address for this cluster
vip_cidrmask: 8 # cidr network mask length
vip_interface: eth1 # interface to add virtual ip
pg_services_extra: # extra services in addition to pg_services_default, array of service definition
# standby service will route {ip|name}:5435 to sync replica's pgbouncer (5435->6432 standby)
- name: standby # required, service name, the actual svc name will be prefixed with `pg_cluster`, e.g: pg-meta-standby
src_ip: "*" # required, service bind ip address, `*` for all ip, `vip` for cluster `vip_address`
src_port: 5435 # required, service exposed port (work as kubernetes service node port mode)
dst_port: pgbouncer # optional, destination port, postgres|pgbouncer|<port_number> , pgbouncer(6432) by default
check_method: http # optional, health check method: http is the only available method for now
check_port: patroni # optional, health check port: patroni|pg_exporter|<port_number> , patroni(8008) by default
check_url: /sync # optional, health check url path, /read-only?lag=0 by default
check_code: 200 # optional, health check expected http code, 200 by default
selector: "[]" # required, JMESPath to filter inventory ()
selector_backup: "[? pg_role == `primary`]" # primary used as backup server for standby service (will not work because /sync for )
haproxy: # optional, adhoc parameters for haproxy service provider (vip_l4 is another service provider)
maxconn: 3000 # optional, max allowed front-end connection
balance: roundrobin # optional, haproxy load balance algorithm (roundrobin by default, other: leastconn)
default_server_options: 'inter 3s fastinter 1s downinter 5s rise 3 fall 3 on-marked-down shutdown-sessions slowstart 30s maxconn 3000 maxqueue 128 weight 100'
#================================================================#
# REDIS Clusters #
#================================================================#
#----------------------------------#
# redis sentinel example #
#----------------------------------#
redis-meta:
hosts:
10.10.10.10:
redis_node: 1
redis_instances: { 6001 : {} ,6002 : {} , 6003 : {} }
vars:
redis_cluster: redis-meta
redis_mode: sentinel
redis_max_memory: 64MB
#----------------------------------#
# redis cluster example #
#----------------------------------#
redis-test:
hosts:
10.10.10.11:
redis_node: 1
redis_instances: { 6501 : {} ,6502 : {} ,6503 : {} ,6504 : {} ,6505 : {} ,6506 : {} }
10.10.10.12:
redis_node: 2
redis_instances: { 6501 : {} ,6502 : {} ,6503 : {} ,6504 : {} ,6505 : {} ,6506 : {} }
vars:
redis_cluster: redis-test # name of this redis 'cluster'
redis_mode: cluster # standalone,cluster,sentinel
redis_max_memory: 32MB # max memory used by each redis instance
redis_mem_policy: allkeys-lru # memory eviction policy
#----------------------------------#
# redis standalone example #
#----------------------------------#
redis-common:
hosts:
10.10.10.13:
redis_node: 1
redis_instances:
6501: {}
6502: { replica_of: '10.10.10.13 6501' }
6503: { replica_of: '10.10.10.13 6501' }
vars:
redis_cluster: redis-common # name of this redis 'cluster'
redis_mode: standalone # standalone,cluster,sentinel
redis_max_memory: 64MB # max memory used by each redis instance
####################################################################
# VARS #
####################################################################
vars: # global variables
version: v1.5.1 # pigsty version string
#================================================================#
# VARS: INFRA #
#================================================================#
#-----------------------------------------------------------------
# CONNECT
#-----------------------------------------------------------------
# INSTANCE level ansible connect parameters
# ansible_user: vagrant # which user to be used (when connecting to remote nodes)
# ansible_port: 22 # which port to be used (when connecting to remote nodes)
# ansible_host: meta # ssh alias host name (when connecting to remote nodes)
# ansible_ssh_private_key_file: ~/.ssh/id_rsa # ssh private key file
# ansible_ssh_common_args: '-o StrictHostKeyChecking=no' # ssh common args
proxy_env: # global proxy env when downloading packages
no_proxy: "localhost,127.0.0.1,10.0.0.0/8,192.168.0.0/16,*.pigsty,*.aliyun.com,mirrors.*,*.myqcloud.com,*.tsinghua.edu.cn"
# http_proxy: # set your proxy here: e.g http://user:pass@proxy.xxx.com
# https_proxy: # set your proxy here: e.g http://user:pass@proxy.xxx.com
# all_proxy: # set your proxy here: e.g http://user:pass@proxy.xxx.com
#-----------------------------------------------------------------
# CA
#-----------------------------------------------------------------
ca_method: create # create|copy|recreate
ca_subject: "/CN=root-ca" # self-signed CA subject
ca_homedir: /ca # ca cert directory
ca_cert: ca.crt # ca public key/cert
ca_key: ca.key # ca private key
#-----------------------------------------------------------------
# NGINX
#-----------------------------------------------------------------
nginx_enabled: true # build local repo on this node
nginx_port: 80 # repo listen address, must same as repo_address
nginx_home: /www # default repo home dir
nginx_upstream: # domain names and upstream servers
- { name: home , domain: home.pigsty.cc , endpoint: "10.10.10.10:80" } # default -> index.html (80)
- { name: grafana , domain: demo.pigsty.cc , endpoint: "10.10.10.10:3000" } # pigsty grafana (3000)
- { name: loki , domain: l.pigsty.cc , endpoint: "10.10.10.10:3100" } # pigsty loki (3100)
- { name: prometheus , domain: p.pigsty.cc , endpoint: "10.10.10.10:9090" } # pigsty prometheus (9090)
- { name: alertmanager , domain: a.pigsty.cc , endpoint: "10.10.10.10:9093" } # pigsty alertmanager (9093)
- { name: consul , domain: c.pigsty.cc , endpoint: "127.0.0.1:8500" } # pigsty consul UI (8500) (domain required)
- { name: postgrest , domain: api.pigsty.cc , endpoint: "127.0.0.1:8884" } #== v optional ==#
- { name: pgadmin , domain: adm.pigsty.cc , endpoint: "127.0.0.1:8885" }
- { name: pgweb , domain: cli.pigsty.cc , endpoint: "127.0.0.1:8886" }
- { name: bytebase , domain: ddl.pigsty.cc , endpoint: "127.0.0.1:8887" }
- { name: jupyter , domain: lab.pigsty.cc , endpoint: "127.0.0.1:8888" }
- { name: gitea , domain: git.pigsty.cc , endpoint: "127.0.0.1:8889" }
- { name: minio , domain: sss.pigsty.cc , endpoint: "127.0.0.1:9000" }
nginx_indexes: # application nav links on home page
- { name: PgAdmin4 , url : 'http://adm.pigsty.cc' , comment: 'PgAdmin4 for PostgreSQL' }
- { name: PGWeb , url : 'http://cli.pigsty.cc' , comment: 'PGWEB Browser Client' }
- { name: ByteBase , url : 'http://ddl.pigsty.cc' , comment: 'ByteBase Schema Migrator' }
- { name: PostgREST , url : 'http://api.pigsty.cc' , comment: 'Kong API Gateway' }
- { name: Gitea , url : 'http://git.pigsty.cc' , comment: 'Minio Object Storage' }
- { name: Minio , url : 'http://sss.pigsty.cc' , comment: 'Minio Object Storage' }
- { name: Explain , url : '/pev2' , comment: 'postgres explain visualizer' }
- { name: Package , url : '/pigsty' , comment: 'local yum repo packages' }
- { name: Matrix Pkg , url : '/matrix' , comment: 'matrixdb repo packages' }
- { name: PG Logs , url : '/logs' , comment: 'postgres raw csv logs' }
- { name: Schemas , url : '/schema' , comment: 'schemaspy summary report' }
- { name: Reports , url : '/report' , comment: 'pgbadger summary report' }
- { name: ISD , url : '${grafana}/d/isd-overview' , comment: 'noaa isd data visualization' }
- { name: Covid , url : '${grafana}/d/covid-overview' , comment: 'covid data visualization' }
- { name: Worktime , url : '${grafana}/d/worktime-overview' , comment: 'worktime query' }
- { name: DBTrend , url : '${grafana}/d/dbeng-trending' , comment: 'DB Engine Trending Graph' }
#-----------------------------------------------------------------
# REPO
#-----------------------------------------------------------------
repo_name: pigsty # repo name, pigsty by default
repo_address: pigsty # external address to this repo (ip:port or url)
repo_rebuild: false # force re-download packages
repo_remove: true # remove existing upstream repo
repo_upstreams: # where to download packages?
- name: base
description: CentOS-$releasever - Base
gpgcheck: no
baseurl:
- https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/os/$basearch/
- http://mirrors.aliyun.com/centos/$releasever/os/$basearch/
- http://mirror.centos.org/centos/$releasever/os/$basearch/
- name: updates
description: CentOS-$releasever - Updates
gpgcheck: no
baseurl:
- https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/updates/$basearch/
- http://mirrors.aliyun.com/centos/$releasever/updates/$basearch/
- http://mirror.centos.org/centos/$releasever/updates/$basearch/
- name: extras
description: CentOS-$releasever - Extras
baseurl:
- https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/extras/$basearch/
- http://mirrors.aliyun.com/centos/$releasever/extras/$basearch/
- http://mirror.centos.org/centos/$releasever/extras/$basearch/
gpgcheck: no
- name: epel
description: CentOS $releasever - epel
gpgcheck: no
baseurl:
- https://mirrors.tuna.tsinghua.edu.cn/epel/$releasever/$basearch
- http://mirrors.aliyun.com/epel/$releasever/$basearch
- http://download.fedoraproject.org/pub/epel/$releasever/$basearch
- name: grafana
description: Grafana Official Yum Repo
enabled: yes
gpgcheck: no
baseurl:
- https://mirrors.tuna.tsinghua.edu.cn/grafana/yum/rpm
- https://packages.grafana.com/oss/rpm
- name: prometheus
description: Prometheus and exporters
gpgcheck: no
baseurl: https://packagecloud.io/prometheus-rpm/release/el/$releasever/$basearch
- name: pgdg-common
description: PostgreSQL common RPMs for RHEL/CentOS $releasever - $basearch
gpgcheck: no
baseurl:
- http://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/common/redhat/rhel-$releasever-$basearch
- https://download.postgresql.org/pub/repos/yum/common/redhat/rhel-$releasever-$basearch
- name: pgdg14
description: PostgreSQL 14 for RHEL/CentOS $releasever - $basearch
gpgcheck: no
baseurl:
- https://mirrors.tuna.tsinghua.edu.cn/postgresql/repos/yum/14/redhat/rhel-$releasever-$basearch
- https://download.postgresql.org/pub/repos/yum/14/redhat/rhel-$releasever-$basearch
- name: timescaledb
description: TimescaleDB for RHEL/CentOS $releasever - $basearch
gpgcheck: no
baseurl:
- https://packagecloud.io/timescale/timescaledb/el/7/$basearch
- name: centos-sclo
description: CentOS-$releasever - SCLo
gpgcheck: no
#mirrorlist: http://mirrorlist.centos.org?arch=$basearch&release=$releasever&repo=sclo-sclo
baseurl: http://mirrors.aliyun.com/centos/$releasever/sclo/$basearch/sclo/
- name: centos-sclo-rh
description: CentOS-$releasever - SCLo rh
gpgcheck: no
#mirrorlist: http://mirrorlist.centos.org?arch=$basearch&release=7&repo=sclo-rh
baseurl: http://mirrors.aliyun.com/centos/$releasever/sclo/$basearch/rh/
- name: nginx
description: Nginx Official Yum Repo
skip_if_unavailable: true
gpgcheck: no
baseurl: http://nginx.org/packages/centos/$releasever/$basearch/
- name: harbottle # for latest consul & kubernetes
description: Copr repo for main owned by harbottle
skip_if_unavailable: true
gpgcheck: no
baseurl: https://download.copr.fedorainfracloud.org/results/harbottle/main/epel-$releasever-$basearch/
- name: pg_probackup # for pg_probackup
description: PG_PROBACKUP Centos packages for PostgresPro Standard and Enterprise - $basearch
skip_if_unavailable: true
gpgcheck: no
baseurl: https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/centos-$releasever-$basearch
- name: docker-ce # for latest docker
description: Docker CE Stable - $basearch
skip_if_unavailable: true
gpgcheck: no
baseurl:
- https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable
- https://download.docker.com/linux/centos/$releasever/$basearch/stable
repo_packages: # which packages to be included # what to download #
- epel-release nginx wget yum-utils yum createrepo sshpass zip unzip # ---- boot ---- #
- ntp chrony uuid lz4 bzip2 nc pv jq vim-enhanced make patch bash lsof wget git tuned perf ftp lrzsz rsync # ---- node ---- #
- numactl grubby sysstat dstat iotop bind-utils net-tools tcpdump socat ipvsadm telnet ca-certificates keepalived # ----- utils ----- #
- readline zlib openssl openssh-clients libyaml libxml2 libxslt libevent perl perl-devel perl-ExtUtils* # --- deps:pg --- #
- readline-devel zlib-devel uuid-devel libuuid-devel libxml2-devel libxslt-devel openssl-devel libicu-devel # --- deps:devel -- #
- grafana prometheus2 pushgateway alertmanager mtail consul consul_exporter consul-template etcd dnsmasq # ----- meta ----- #
- node_exporter nginx_exporter blackbox_exporter redis_exporter # ---- exporter --- #
- ansible python python-pip python-psycopg2 # - ansible & py3 - #
- python3 python3-psycopg2 python36-requests python3-etcd python3-consul python36-urllib3 python36-idna python36-pyOpenSSL python36-cryptography
- patroni patroni-consul patroni-etcd pgbouncer pg_cli pgbadger pg_activity tail_n_mail # -- pgsql common - #
- pgcenter boxinfo check_postgres emaj pgbconsole pg_bloat_check pgquarrel barman barman-cli pgloader pgFormatter pitrery pspg pgxnclient PyGreSQL
- postgresql14* postgis32_14* citus_14* pglogical_14* timescaledb-2-postgresql-14 pg_repack_14 wal2json_14 # -- pg14 packages -#
- pg_qualstats_14 pg_stat_kcache_14 pg_stat_monitor_14 pg_top_14 pg_track_settings_14 pg_wait_sampling_14 pg_probackup-std-14
- pg_statement_rollback_14 system_stats_14 plproxy_14 plsh_14 pldebugger_14 plpgsql_check_14 pgmemcache_14 # plr_14
- mysql_fdw_14 ogr_fdw_14 tds_fdw_14 sqlite_fdw_14 firebird_fdw_14 hdfs_fdw_14 mongo_fdw_14 osm_fdw_14 pgbouncer_fdw_14
- hypopg_14 geoip_14 rum_14 hll_14 ip4r_14 prefix_14 pguri_14 tdigest_14 topn_14 periods_14
- bgw_replstatus_14 count_distinct_14 credcheck_14 ddlx_14 extra_window_functions_14 logerrors_14 mysqlcompat_14 orafce_14
- repmgr_14 pg_auth_mon_14 pg_auto_failover_14 pg_background_14 pg_bulkload_14 pg_catcheck_14 pg_comparator_14
- pg_cron_14 pg_fkpart_14 pg_jobmon_14 pg_partman_14 pg_permissions_14 pg_prioritize_14 pgagent_14
- pgaudit16_14 pgauditlogtofile_14 pgcryptokey_14 pgexportdoc_14 pgfincore_14 pgimportdoc_14 powa_14 pgmp_14 pgq_14
- pgquarrel-0.7.0-1 pgsql_tweaks_14 pgtap_14 pgtt_14 postgresql-unit_14 postgresql_anonymizer_14 postgresql_faker_14
- safeupdate_14 semver_14 set_user_14 sslutils_14 table_version_14 # pgrouting_14 osm2pgrouting_14
- clang coreutils diffutils rpm-build rpm-devel rpmlint rpmdevtools bison flex # gcc gcc-c++ # - build utils - #
- docker-ce docker-compose kubelet kubectl kubeadm kubernetes-cni helm # - cloud native- #
repo_url_packages: # extra packages from url
- https://github.com/Vonng/loki-rpm/releases/download/v2.5.0/loki-2.5.0.x86_64.rpm
- https://github.com/Vonng/loki-rpm/releases/download/v2.5.0/promtail-2.5.0.x86_64.rpm
- https://github.com/Vonng/pg_exporter/releases/download/v0.5.0/pg_exporter-0.5.0.x86_64.rpm
- https://github.com/cybertec-postgresql/vip-manager/releases/download/v1.0.2/vip-manager-1.0.2-1.x86_64.rpm
- https://github.com/Vonng/haproxy-rpm/releases/download/v2.5.7/haproxy-2.5.7-1.el7.x86_64.rpm
- https://github.com/Vonng/pigsty-pkg/releases/download/misc/redis-6.2.7-1.el7.remi.x86_64.rpm
- https://github.com/dalibo/pev2/releases/download/v0.24.0/pev2.tar.gz
- https://github.com/Vonng/pigsty-pkg/releases/download/misc/polysh-0.4-1.noarch.rpm
#-----------------------------------------------------------------
# NAMESERVER
#-----------------------------------------------------------------
nameserver_enabled: false # setup dnsmasq
dns_records: # dynamic dns record resolved by dnsmasq
- 10.10.10.10 pg-meta # sandbox vip for pg-meta
- 10.10.10.11 pg-test # sandbox vip for pg-test
- 10.10.10.10 pg-meta-1 # sandbox instance pg-meta-1
- 10.10.10.11 pg-test-1 # sandbox instance node-1
- 10.10.10.12 pg-test-2 # sandbox instance node-2
- 10.10.10.13 pg-test-3 # sandbox instance node-3
#-----------------------------------------------------------------
# PROMETHEUS
#-----------------------------------------------------------------
prometheus_enabled: true # setup prometheus
prometheus_data_dir: /data/prometheus/data
prometheus_options: '--storage.tsdb.retention=15d'
prometheus_reload: false # reload prometheus instead of recreate it?
prometheus_sd_method: static # service discovery method: static|consul
prometheus_sd_interval: 5s # service discovery refresh interval
prometheus_scrape_interval: 10s # global scrape & evaluation interval
prometheus_scrape_timeout: 8s # scrape timeout
#-----------------------------------------------------------------
# EXPORTER
#-----------------------------------------------------------------
exporter_install: none # none|yum|binary, none by default
exporter_repo_url: '' # if set, repo will be added to /etc/yum.repos.d/
exporter_metrics_path: /metrics # default metric path for pg related exporter
#-----------------------------------------------------------------
# GRAFANA
#-----------------------------------------------------------------
grafana_enabled: true # enable grafana? only works on meta nodes
grafana_endpoint: http://10.10.10.10:3000 # grafana endpoint url
grafana_admin_username: admin # default grafana admin username
grafana_admin_password: pigsty # default grafana admin password
grafana_database: sqlite3 # default grafana database type: sqlite3|postgres
grafana_pgurl: postgres://dbuser_grafana:DBUser.Grafana@meta:5436/grafana
grafana_plugin_method: install # none|install|always, none will skip plugin install
grafana_plugin_cache: /www/pigsty/plugins.tgz # path to grafana plugins cache tarball
grafana_plugin_list: # plugins that will be downloaded via grafana-cli
- marcusolsson-csv-datasource
- marcusolsson-json-datasource
- marcusolsson-treemap-panel
grafana_plugin_git: # plugins that will be downloaded via git
- https://github.com/Vonng/vonng-echarts-panel
#-----------------------------------------------------------------
# LOKI
#-----------------------------------------------------------------
loki_enabled: true # enable loki? only works on meta nodes
loki_clean: false # whether remove existing loki data
loki_endpoint: http://10.10.10.10:3100/loki/api/v1/push # where to push data
loki_options: '-config.file=/etc/loki.yml -config.expand-env=true'
loki_data_dir: /data/loki # default loki data dir
loki_retention: 15d # log retention period
#-----------------------------------------------------------------
# DCS
#-----------------------------------------------------------------
dcs_name: pigsty # consul dc name
dcs_servers: # dcs server dict in name:ip format
meta-1: 10.10.10.10 # using existing external dcs cluster is recommended for HA
meta-2: 10.10.10.11 # node with ip in dcs_servers will be initialized as dcs servers
meta-3: 10.10.10.12 # it's recommend to reuse meta nodes as dcs servers if no ad hoc cluster available
dcs_registry: consul # where to register services: none | consul | etcd | both
dcs_safeguard: false # if true, running dcs will NOT be removed
dcs_clean: true # if true, running dcs will be purged during node init, DANGEROUS
#-----------------------------------------------------------------
# CONSUL
#-----------------------------------------------------------------
consul_enabled: true # enable consul server/agent by default?
consul_data_dir: /data/consul # consul data dir (/data/consul by default)
#-----------------------------------------------------------------
# ETCD
#-----------------------------------------------------------------
etcd_enabled: true # enable etcd server by default?
etcd_data_dir: /data/etcd # etcd data dir (/data/etcd by default)
#================================================================#
# VARS: NODES #
#================================================================#
# global variables for nodes (including meta)
#-----------------------------------------------------------------
# NODE_IDENTITY
#-----------------------------------------------------------------
meta_node: false # node with meta_node flag will be marked as admin node
# nodename: # [OPTIONAL] # node instance identity, used as `ins`, hostname by default
node_cluster: nodes # [OPTIONAL] # node cluster identity, used as `cls`, 'nodes' by default
nodename_overwrite: true # overwrite node's hostname with nodename?
nodename_exchange: false # exchange nodename among play hosts?
#-----------------------------------------------------------------
# NODE_DNS
#-----------------------------------------------------------------
node_etc_hosts_default: # static dns records in /etc/hosts
- 10.10.10.10 meta pigsty p.pigsty g.pigsty a.pigsty c.pigsty l.pigsty
- 10.10.10.10 api.pigsty adm.pigsty cli.pigsty ddl.pigsty lab.pigsty git.pigsty sss.pigsty
node_etc_hosts: [] # extra static dns records in /etc/hosts
node_dns_method: add # add (default) | none (skip) | overwrite (remove old settings)
node_dns_servers: # dynamic nameserver in /etc/resolv.conf
- 10.10.10.10
node_dns_options: # dns resolv options
- options single-request-reopen timeout:1 rotate
- domain service.consul
#-----------------------------------------------------------------
# NODE_REPO
#-----------------------------------------------------------------
node_repo_method: local # none|local: ad local repo|public: add upstream directly
node_repo_remove: true # remove existing repo on nodes?
node_repo_local_urls: # list local repo url, if node_repo_method = local
- http://pigsty/pigsty.repo
node_packages: [ ] # extra packages for all nodes
node_packages_default: # common packages for all nodes
- wget,sshpass,ntp,chrony,tuned,uuid,lz4,make,patch,bash,lsof,wget,unzip,git,ftp,vim-minimal,ca-certificates
- numactl,grubby,sysstat,dstat,iotop,bind-utils,net-tools,tcpdump,socat,ipvsadm,telnet,tuned,nc,pv,jq,perf
- readline,zlib,openssl,openssl-libs,openssh-clients,python3,python36-requests,node_exporter,consul,etcd,promtail
node_packages_meta: # extra packages for meta nodes
- grafana,prometheus2,alertmanager,loki,nginx_exporter,blackbox_exporter,pushgateway,redis,postgresql14
- nginx,ansible,pgbadger,python-psycopg2,dnsmasq,coreutils,diffutils,polysh,docker-ce,docker-compose
node_packages_meta_pip: jupyterlab
#-----------------------------------------------------------------
# NODE_TUNE
#-----------------------------------------------------------------
node_disable_firewall: true # disable firewall
node_disable_selinux: true # disable selinux
node_disable_numa: false # disable numa (node reboot required)
node_disable_swap: false # disable swap, use with caution
node_static_network: true # keep dns resolver settings after reboot
node_disk_prefetch: false # setup disk prefetch on HDD to increase performance
node_kernel_modules: [ softdog, br_netfilter, ip_vs, ip_vs_rr, ip_vs_rr, ip_vs_wrr, ip_vs_sh ]
node_tune: tiny # install and activate tuned profile: none|oltp|olap|crit|tiny
node_sysctl_params: { } # set additional sysctl parameters, k:v format
#-----------------------------------------------------------------
# NODE_ADMIN
#-----------------------------------------------------------------
node_data_dir: /data # main data directory
node_admin_enabled: true # create a default admin user defined by `node_admin_*` ?
node_admin_uid: 88 # uid and gid for this admin user
node_admin_username: dba # name of this admin user, dba by default
node_admin_ssh_exchange: true # exchange admin ssh key among each pgsql cluster ?
node_admin_pk_current: true # add current user's ~/.ssh/id_rsa.pub to admin authorized_keys ?
node_admin_pk_list: # ssh public keys to be added to admin user (REPLACE WITH YOURS!)
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC7IMAMNavYtWwzAJajKqwdn3ar5BhvcwCnBTxxEkXhGlCO2vfgosSAQMEflfgvkiI5nM1HIFQ8KINlx1XLO7SdL5KdInG5LIJjAFh0pujS4kNCT9a5IGvSq1BrzGqhbEcwWYdju1ZPYBcJm/MG+JD0dYCh8vfrYB/cYMD0SOmNkQ== vagrant@pigsty.com'
#-----------------------------------------------------------------
# NODE_TIME
#-----------------------------------------------------------------
node_timezone: Asia/Hong_Kong # default node timezone, empty will not change
node_ntp_enabled: true # enable ntp service? false will leave ntp service untouched
node_ntp_service: ntp # ntp service provider: ntp|chrony
node_ntp_servers: # default NTP servers
- pool cn.pool.ntp.org iburst
- pool pool.ntp.org iburst
- pool time.pool.aliyun.com iburst
- server 10.10.10.10 iburst
- server ntp.tuna.tsinghua.edu.cn iburst
node_crontab_overwrite: true # true will overwrite /etc/crontab, false will append crontab
node_crontab: [ ] # crontab entries in /etc/crontab
#-----------------------------------------------------------------
# DOCKER
#-----------------------------------------------------------------
docker_enabled: false # enable docker on all nodes? (you can enable them on meta nodes only)
docker_cgroups_driver: systemd # docker cgroup fs driver
docker_registry_mirrors: [] # docker registry mirror
docker_image_cache: /tmp/docker.tgz # docker images tarball to be loaded if exists
#-----------------------------------------------------------------
# NODE_EXPORTER
#-----------------------------------------------------------------
node_exporter_enabled: true # setup node_exporter on instance
node_exporter_port: 9100 # default port for node exporter
node_exporter_options: '--no-collector.softnet --no-collector.nvme --collector.ntp --collector.tcpstat --collector.processes'
#-----------------------------------------------------------------
# PROMTAIL
#-----------------------------------------------------------------
promtail_enabled: true # enable promtail logging collector?
promtail_clean: false # remove promtail status file? false by default
promtail_port: 9080 # default listen address for promtail
promtail_options: '-config.file=/etc/promtail.yml -config.expand-env=true'
promtail_positions: /var/log/positions.yaml # position status for promtail
#================================================================#
# VARS: PGSQL #
#================================================================#
#-----------------------------------------------------------------
# PG_IDENTITY
#-----------------------------------------------------------------
# pg_cluster: # <CLUSTER> <REQUIRED> : pgsql cluster name
# pg_role: replica # <INSTANCE> <REQUIRED> : pg role : primary, replica, offline
# pg_seq: 0 # <INSTANCE> <REQUIRED> : instance seq number
# pg_instances: {} # <INSTANCE> : define multiple pg instances on node, used by monly & gpsql
# pg_upstream: # <INSTANCE> : replication upstream ip addr
# pg_shard: # <CLUSTER> : pgsql shard name
# pg_sindex: 0 # <CLUSTER> : pgsql shard index
# gp_role: master # <CLUSTER> : gpsql role, master or segment
pg_offline_query: false # <INSTANCE> [FLAG] set to true to enable offline query on this instance (instance level)
pg_backup: false # <INSTANCE> [FLAG] store base backup on this node (instance level, reserved)
pg_weight: 100 # <INSTANCE> [FLAG] default load balance weight (instance level)
pg_hostname: true # [FLAG] reuse postgres identity name as node identity?
pg_preflight_skip: false # [FLAG] skip preflight identity check
#-----------------------------------------------------------------
# PG_BUSINESS
#-----------------------------------------------------------------
# overwrite these variables on <CLUSTER> level
pg_users: [] # business users
pg_databases: [] # business databases
pg_services_extra: [] # extra services
pg_hba_rules_extra: [] # extra hba rules
pgbouncer_hba_rules_extra: [] # extra pgbouncer hba rules
# WARNING: change these in production environment!
pg_admin_username: dbuser_dba
pg_admin_password: DBUser.DBA
pg_monitor_username: dbuser_monitor
pg_monitor_password: DBUser.Monitor
pg_replication_username: replicator
pg_replication_password: DBUser.Replicator
#-----------------------------------------------------------------
# PG_INSTALL
#-----------------------------------------------------------------
pg_dbsu: postgres # os user for database, postgres by default (unwise to change it)
pg_dbsu_uid: 26 # os dbsu uid and gid, 26 for default postgres users and groups
pg_dbsu_sudo: limit # dbsu sudo privilege: none|limit|all|nopass, limit by default
pg_dbsu_home: /var/lib/pgsql # postgresql home directory
pg_dbsu_ssh_exchange: true # exchange postgres dbsu ssh key among same cluster ?
pg_version: 14 # default postgresql version to be installed
pgdg_repo: false # add pgdg official repo before install (in case of no local repo available)
pg_add_repo: false # add postgres relate repo before install ?
pg_bin_dir: /usr/pgsql/bin # postgres binary dir, default is /usr/pgsql/bin, which use /usr/pgsql -> /usr/pgsql-{ver}
pg_packages: # postgresql related packages. `${pg_version} will be replaced by `pg_version`
- postgresql${pg_version}* # postgresql kernel packages
- postgis32_${pg_version}* # postgis
- citus_${pg_version}* # citus
- timescaledb-2-postgresql-${pg_version} # timescaledb
- pgbouncer pg_exporter pgbadger pg_activity node_exporter consul haproxy vip-manager
- patroni patroni-consul patroni-etcd python3 python3-psycopg2 python36-requests python3-etcd
- python3-consul python36-urllib3 python36-idna python36-pyOpenSSL python36-cryptography
pg_extensions: # postgresql extensions, `${pg_version} will be replaced by actual `pg_version`
- pg_repack_${pg_version} pg_qualstats_${pg_version} pg_stat_kcache_${pg_version} pg_stat_monitor_${pg_version} wal2json_${pg_version}
# - ogr_fdw${pg_version} mysql_fdw_${pg_version} redis_fdw_${pg_version} mongo_fdw${pg_version} hdfs_fdw_${pg_version}
# - count_distinct${version} ddlx_${version} geoip${version} orafce${version}
# - hypopg_${version} ip4r${version} jsquery_${version} logerrors_${version} periods_${version} pg_auto_failover_${version} pg_catcheck${version}
# - pg_fkpart${version} pg_jobmon${version} pg_partman${version} pg_prioritize_${version} pg_track_settings${version} pgaudit15_${version}
# - pgcryptokey${version} pgexportdoc${version} pgimportdoc${version} pgmemcache-${version} pgmp${version} pgq-${version} pgquarrel pgrouting_${version}
# - pguint${version} pguri${version} prefix${version} safeupdate_${version} semver${version} table_version${version} tdigest${version}
#-----------------------------------------------------------------
# PG_BOOTSTRAP
#-----------------------------------------------------------------
pg_safeguard: false # true will disable pg_clean at all, even for pgsql-remove.yml
pg_clean: true # true will clean running postgres during pgsql init (DANGEROUS)
pg_data: /pg/data # postgres data directory (soft link)
pg_fs_main: /data # primary data disk mount point /pg -> {{ pg_fs_main }}/postgres/{{ pg_instance }}
pg_fs_bkup: /data/backups # backup disk mount point /pg/* -> {{ pg_fs_bkup }}/postgres/{{ pg_instance }}/*
pg_dummy_filesize: 64MiB # /pg/dummy hold some disk space for emergency use
pg_listen: '0.0.0.0' # postgres listen address, '0.0.0.0' (all ipv4 addr) by default
pg_port: 5432 # postgres port, 5432 by default
pg_localhost: /var/run/postgresql # localhost unix socket dir for connection
patroni_enabled: true # if not enabled, no postgres cluster will be created
patroni_mode: default # pause|default|remove
pg_dcs_type: consul # which dcs to use: consul or etcd or raft
pg_namespace: /pg # top level key namespace in dcs
patroni_port: 8008 # default patroni port
patroni_watchdog_mode: automatic # watchdog mode: off|automatic|required
pg_conf: tiny.yml # pgsql template: {oltp|olap|crit|tiny}.yml
pg_libs: 'timescaledb, pg_stat_statements, auto_explain' # extensions to be loaded
pg_delay: 0 # apply delay for standby cluster leader
pg_checksum: false # enable data checksum by default
pg_encoding: UTF8 # database cluster encoding, UTF8 by default
pg_locale: C # database cluster local, C by default
pg_lc_collate: C # database cluster collate, C by default
pg_lc_ctype: en_US.UTF8 # database character type, en_US.UTF8 by default (for i18n full-text search)
pgbouncer_enabled: true # if not enabled, pgbouncer will not be created
pgbouncer_port: 6432 # pgbouncer port, 6432 by default
pgbouncer_poolmode: transaction # pooling mode: session|transaction|statement, transaction pooling by default
pgbouncer_max_db_conn: 100 # max connection to single database, DO NOT set this larger than postgres max conn or db connlimit
#-----------------------------------------------------------------
# PG_PROVISION
#-----------------------------------------------------------------
pg_provision: true # whether provisioning postgres cluster
pg_init: pg-init # init script for cluster template
pg_default_roles:
- { name: dbrole_readonly , login: false , comment: role for global read-only access } # production read-only role
- { name: dbrole_readwrite , login: false , roles: [dbrole_readonly], comment: role for global read-write access } # production read-write role
- { name: dbrole_offline , login: false , comment: role for restricted read-only access (offline instance) } # restricted-read-only role
- { name: dbrole_admin , login: false , roles: [pg_monitor, dbrole_readwrite] , comment: role for object creation } # production DDL change role
- { name: postgres , superuser: true , comment: system superuser } # system dbsu, name is designated by `pg_dbsu`
- { name: dbuser_dba , superuser: true , roles: [dbrole_admin] , comment: system admin user } # admin dbsu, name is designated by `pg_admin_username`
- { name: replicator , replication: true , bypassrls: true , roles: [pg_monitor, dbrole_readonly] , comment: system replicator } # replicator
- { name: dbuser_monitor , roles: [pg_monitor, dbrole_readonly] , comment: system monitor user , parameters: {log_min_duration_statement: 1000 } } # monitor user
- { name: dbuser_stats , password: DBUser.Stats , roles: [dbrole_offline] , comment: business offline user for offline queries and ETL } # ETL user
pg_default_privileges: # - privileges - #
- GRANT USAGE ON SCHEMAS TO dbrole_readonly
- GRANT SELECT ON TABLES TO dbrole_readonly
- GRANT SELECT ON SEQUENCES TO dbrole_readonly
- GRANT EXECUTE ON FUNCTIONS TO dbrole_readonly
- GRANT USAGE ON SCHEMAS TO dbrole_offline
- GRANT SELECT ON TABLES TO dbrole_offline
- GRANT SELECT ON SEQUENCES TO dbrole_offline
- GRANT EXECUTE ON FUNCTIONS TO dbrole_offline
- GRANT INSERT, UPDATE, DELETE ON TABLES TO dbrole_readwrite
- GRANT USAGE, UPDATE ON SEQUENCES TO dbrole_readwrite
- GRANT TRUNCATE, REFERENCES, TRIGGER ON TABLES TO dbrole_admin
- GRANT CREATE ON SCHEMAS TO dbrole_admin
pg_default_schemas: [ monitor ] # default schemas to be created
pg_default_extensions: # default extensions to be created
- { name: 'pg_stat_statements', schema: 'monitor' }
- { name: 'pgstattuple', schema: 'monitor' }
- { name: 'pg_qualstats', schema: 'monitor' }
- { name: 'pg_buffercache', schema: 'monitor' }
- { name: 'pageinspect', schema: 'monitor' }
- { name: 'pg_prewarm', schema: 'monitor' }
- { name: 'pg_visibility', schema: 'monitor' }
- { name: 'pg_freespacemap', schema: 'monitor' }
- { name: 'pg_repack', schema: 'monitor' }
- name: postgres_fdw
- name: file_fdw
- name: btree_gist
- name: btree_gin
- name: pg_trgm
- name: intagg
- name: intarray
pg_reload: true # reload postgres after hba changes
pg_hba_rules: # postgres host-based authentication rules
- title: allow meta node password access
role: common
rules:
- host all all 10.10.10.10/32 md5
- title: allow intranet admin password access
role: common
rules:
- host all +dbrole_admin 10.0.0.0/8 md5
- host all +dbrole_admin 172.16.0.0/12 md5
- host all +dbrole_admin 192.168.0.0/16 md5
- title: allow intranet password access
role: common
rules:
- host all all 10.0.0.0/8 md5
- host all all 172.16.0.0/12 md5
- host all all 192.168.0.0/16 md5
- title: allow local read/write (local production user via pgbouncer)
role: common
rules:
- local all +dbrole_readonly md5
- host all +dbrole_readonly 127.0.0.1/32 md5
- title: allow offline query (ETL,SAGA,Interactive) on offline instance
role: offline
rules:
- host all +dbrole_offline 10.0.0.0/8 md5
- host all +dbrole_offline 172.16.0.0/12 md5
- host all +dbrole_offline 192.168.0.0/16 md5
pgbouncer_hba_rules: # pgbouncer host-based authentication rules
- title: local password access
role: common
rules:
- local all all md5
- host all all 127.0.0.1/32 md5
- title: intranet password access
role: common
rules:
- host all all 10.0.0.0/8 md5
- host all all 172.16.0.0/12 md5
- host all all 192.168.0.0/16 md5
#-----------------------------------------------------------------
# PG_EXPORTER
#-----------------------------------------------------------------
pg_exporter_enabled: true # setup pg_exporter on instance
pg_exporter_config: pg_exporter.yml # use fast cache exporter for demo
pg_exporter_port: 9630 # pg_exporter listen port
pg_exporter_params: 'sslmode=disable' # url query parameters for pg_exporter
pg_exporter_url: '' # optional, overwrite auto-generate postgres connstr
pg_exporter_auto_discovery: true # optional, discovery available database on target instance ?
pg_exporter_exclude_database: 'template0,template1,postgres' # optional, comma separated list of database that WILL NOT be monitored when auto-discovery enabled
pg_exporter_include_database: '' # optional, comma separated list of database that WILL BE monitored when auto-discovery enabled, empty string will disable include mode
pg_exporter_options: '--log.level=info --log.format="logger:syslog?appname=pg_exporter&local=7"'
pgbouncer_exporter_enabled: true # setup pgbouncer_exporter on instance (if you don't have pgbouncer, disable it)
pgbouncer_exporter_port: 9631 # pgbouncer_exporter listen port
pgbouncer_exporter_url: '' # optional, overwrite auto-generate pgbouncer connstr
pgbouncer_exporter_options: '--log.level=info --log.format="logger:syslog?appname=pgbouncer_exporter&local=7"'
#-----------------------------------------------------------------
# PG_SERVICE
#-----------------------------------------------------------------
pg_services: # how to expose postgres service in cluster?
- name: primary # service name {{ pg_cluster }}-primary
src_ip: "*"
src_port: 5433
dst_port: pgbouncer # 5433 route to pgbouncer
check_url: /primary # primary health check, success when instance is primary
selector: "[]" # select all instance as primary service candidate
- name: replica # service name {{ pg_cluster }}-replica
src_ip: "*"
src_port: 5434
dst_port: pgbouncer
check_url: /read-only # read-only health check. (including primary)
selector: "[]" # select all instance as replica service candidate
selector_backup: "[? pg_role == `primary` || pg_role == `offline` ]"
- name: default # service's actual name is {{ pg_cluster }}-default
src_ip: "*" # service bind ip address, * for all, vip for cluster virtual ip address
src_port: 5436 # bind port, mandatory
dst_port: postgres # target port: postgres|pgbouncer|port_number , pgbouncer(6432) by default
check_method: http # health check method: only http is available for now
check_port: patroni # health check port: patroni|pg_exporter|port_number , patroni by default
check_url: /primary # health check url path, / as default
check_code: 200 # health check http code, 200 as default
selector: "[]" # instance selector
haproxy: # haproxy specific fields
maxconn: 3000 # default front-end connection
balance: roundrobin # load balance algorithm (roundrobin by default)
default_server_options: 'inter 3s fastinter 1s downinter 5s rise 3 fall 3 on-marked-down shutdown-sessions slowstart 30s maxconn 3000 maxqueue 128 weight 100'
- name: offline # service name {{ pg_cluster }}-offline
src_ip: "*"
src_port: 5438
dst_port: postgres
check_url: /replica # offline MUST be a replica
selector: "[? pg_role == `offline` || pg_offline_query ]" # instances with pg_role == 'offline' or instance marked with 'pg_offline_query == true'
selector_backup: "[? pg_role == `replica` && !pg_offline_query]" # replica are used as backup server in offline service
haproxy_enabled: true # enable haproxy on this node?
haproxy_reload: true # reload haproxy after config?
haproxy_auth_enabled: false # enable authentication for haproxy admin?
haproxy_admin_username: admin # default haproxy admin username
haproxy_admin_password: pigsty # default haproxy admin password
haproxy_exporter_port: 9101 # default admin/exporter port
haproxy_client_timeout: 24h # client side connection timeout
haproxy_server_timeout: 24h # server side connection timeout
vip_mode: none # none | l2 | l4 , l4 not implemented yet
vip_reload: true # reload vip after config?
# vip_address: 127.0.0.1 # virtual ip address ip (l2 or l4)
# vip_cidrmask: 24 # virtual ip address cidr mask (l2 only)
# vip_interface: eth0 # virtual ip network interface (l2 only)
# dns_mode: vip # vip|all|selector: how to resolve cluster DNS?
# dns_selector: '[]' # if dns_mode == vip, filter instances been resolved
#================================================================#
# VARS: REDIS #
#================================================================#
# REDIS are not enabled by default
#-----------------------------------------------------------------
# REDIS_IDENTITY
#-----------------------------------------------------------------
#redis_cluster: redis-test # name of this redis cluster @ cluster level
#redis_node: 1 # redis node identifier, integer sequence @ node level
#redis_instances: {} # redis instances definition of this redis node @ node level
#-----------------------------------------------------------------
# REDIS_NODE
#-----------------------------------------------------------------
redis_fs_main: /data # main fs mountpoint for redis data
redis_exporter_enabled: true # install redis exporter on redis nodes?
redis_exporter_port: 9121 # default port for redis exporter
redis_exporter_options: '' # default cli args for redis exporter
#-----------------------------------------------------------------
# REDIS_PROVISION
#-----------------------------------------------------------------
redis_safeguard: false # force redis_clean = abort if true
redis_clean: true # abort|skip|clean if redis server already exists
redis_rmdata: true # remove redis data when purging redis server?
redis_mode: standalone # standalone,cluster,sentinel
redis_conf: redis.conf # config template path (except sentinel)
redis_bind_address: '0.0.0.0' # bind address, empty string turns to inventory_hostname
redis_max_memory: 1GB # max memory used by each redis instance
redis_mem_policy: allkeys-lru # memory eviction policy
redis_password: '' # masterauth & requirepass password, disable by empty string
redis_rdb_save: ['1200 1'] # redis rdb save directives, disable with empty list
redis_aof_enabled: false # redis aof enabled
redis_rename_commands: {} # rename dangerous commands
redis_cluster_replicas: 1 # how many replicas for a master in redis cluster ?
...
Last modified 2022-06-20: add timescaledb (3c335f4)