43-node: prod

Production environment simulation with 43 nodes

Overview

  • Conf Name: prod
  • Node Count: 43-node, pigsty/vagrant/spec/prod.rb
  • Description: Production environment simulation with 43 nodes
  • OS Distro: el8, el9, d12, u22, u24
  • OS Arch: x86_64
cp -f conf/prod.yml pigsty.yml

备注:这是一个四节点模版,您需要在生成配置后修改其他三个节点的 IP 地址(可选)


Content

源文件地址:pigsty/conf/full.yml

all:

  children:

    #==========================================================#
    # infra: 2 nodes
    #==========================================================#
    # ./infra.yml -l infra
    # ./docker.yml -l infra (optional)
    infra:
      hosts:
        10.10.10.10: { infra_seq: 1 ,nodename: infra-1 }
        10.10.10.11: { infra_seq: 2 ,nodename: infra-2 }
      vars:
        node_id_from_pg: false
        docker_enabled: true
        node_cluster: infra
        node_conf: oltp
        pg_exporters:           # bin/pgmon-add pg-meta2/pg-test2/pg-src2/pg-dst2
          20001: {pg_cluster: pg-meta2   ,pg_seq: 1 ,pg_host: 10.10.10.10, pg_databases: [{ name: meta }]}
          20002: {pg_cluster: pg-meta2   ,pg_seq: 2 ,pg_host: 10.10.10.11, pg_databases: [{ name: meta }]}

          20003: {pg_cluster: pg-test2   ,pg_seq: 1 ,pg_host: 10.10.10.41, pg_databases: [{ name: test }]}
          20004: {pg_cluster: pg-test2   ,pg_seq: 2 ,pg_host: 10.10.10.42, pg_databases: [{ name: test }]}
          20005: {pg_cluster: pg-test2   ,pg_seq: 3 ,pg_host: 10.10.10.43, pg_databases: [{ name: test }]}
          20006: {pg_cluster: pg-test2   ,pg_seq: 4 ,pg_host: 10.10.10.44, pg_databases: [{ name: test }]}

          20007: {pg_cluster: pg-src2    ,pg_seq: 1 ,pg_host: 10.10.10.45, pg_databases: [{ name: src }]}
          20008: {pg_cluster: pg-src2    ,pg_seq: 2 ,pg_host: 10.10.10.46, pg_databases: [{ name: src }]}
          20009: {pg_cluster: pg-src2    ,pg_seq: 3 ,pg_host: 10.10.10.47, pg_databases: [{ name: src }]}

          20010: {pg_cluster: pg-dst2    ,pg_seq: 3 ,pg_host: 10.10.10.48, pg_databases: [{ name: dst }]}
          20011: {pg_cluster: pg-dst2    ,pg_seq: 4 ,pg_host: 10.10.10.49, pg_databases: [{ name: dst }]}


    #==========================================================#
    # nodes: 43 nodes
    #==========================================================#
    # ./node.yml
    nodes:
      hosts:
        10.10.10.10 : { nodename: meta-1        ,cpu: 8 ,mem: 32768 }
        10.10.10.11 : { nodename: meta-2        ,cpu: 8 ,mem: 32768 }
        10.10.10.12 : { nodename: pg-v12-1      ,cpu: 2 ,mem: 4096  }
        10.10.10.13 : { nodename: pg-v13-1      ,cpu: 2 ,mem: 4096  }
        10.10.10.14 : { nodename: pg-v14-1      ,cpu: 2 ,mem: 4096  }
        10.10.10.15 : { nodename: pg-v15-1      ,cpu: 2 ,mem: 4096  }
        10.10.10.16 : { nodename: pg-v16-1      ,cpu: 2 ,mem: 4096  }
        10.10.10.21 : { nodename: etcd-1        ,cpu: 2 ,mem: 4096  }
        10.10.10.22 : { nodename: etcd-2        ,cpu: 2 ,mem: 4096  }
        10.10.10.23 : { nodename: etcd-3        ,cpu: 2 ,mem: 4096  }
        10.10.10.24 : { nodename: etcd-4        ,cpu: 2 ,mem: 4096  }
        10.10.10.25 : { nodename: etcd-5        ,cpu: 2 ,mem: 4096  }
        10.10.10.26 : { nodename: minio-1       ,cpu: 2 ,mem: 4096  }
        10.10.10.27 : { nodename: minio-2       ,cpu: 2 ,mem: 4096  }
        10.10.10.28 : { nodename: minio-3       ,cpu: 2 ,mem: 4096  }
        10.10.10.29 : { nodename: proxy-1       ,cpu: 2 ,mem: 4096  }
        10.10.10.30 : { nodename: proxy-2       ,cpu: 2 ,mem: 4096  }
        10.10.10.41 : { nodename: pg-test-1     ,cpu: 1 ,mem: 2048  }
        10.10.10.42 : { nodename: pg-test-2     ,cpu: 1 ,mem: 2048  }
        10.10.10.43 : { nodename: pg-test-3     ,cpu: 1 ,mem: 2048  }
        10.10.10.44 : { nodename: pg-test-4     ,cpu: 1 ,mem: 2048  }
        10.10.10.45 : { nodename: pg-src-1      ,cpu: 1 ,mem: 2048  }
        10.10.10.46 : { nodename: pg-src-2      ,cpu: 1 ,mem: 2048  }
        10.10.10.47 : { nodename: pg-src-3      ,cpu: 1 ,mem: 2048  }
        10.10.10.48 : { nodename: pg-dst-1      ,cpu: 1 ,mem: 2048  }
        10.10.10.49 : { nodename: pg-dst-2      ,cpu: 1 ,mem: 2048  }
        10.10.10.50 : { nodename: pg-citus0-1   ,cpu: 1 ,mem: 2048  }
        10.10.10.51 : { nodename: pg-citus0-2   ,cpu: 1 ,mem: 2048  }
        10.10.10.52 : { nodename: pg-citus1-1   ,cpu: 1 ,mem: 2048  }
        10.10.10.53 : { nodename: pg-citus1-2   ,cpu: 1 ,mem: 2048  }
        10.10.10.54 : { nodename: pg-citus2-1   ,cpu: 1 ,mem: 2048  }
        10.10.10.55 : { nodename: pg-citus2-2   ,cpu: 1 ,mem: 2048  }
        10.10.10.56 : { nodename: pg-citus3-1   ,cpu: 1 ,mem: 2048  }
        10.10.10.57 : { nodename: pg-citus3-2   ,cpu: 1 ,mem: 2048  }
        10.10.10.58 : { nodename: pg-citus4-1   ,cpu: 1 ,mem: 2048  }
        10.10.10.59 : { nodename: pg-citus4-2   ,cpu: 1 ,mem: 2048  }
        10.10.10.81 : { nodename: redis-test-1  ,cpu: 1 ,mem: 2048  }
        10.10.10.82 : { nodename: redis-test-2  ,cpu: 1 ,mem: 2048  }
        10.10.10.83 : { nodename: redis-test-3  ,cpu: 1 ,mem: 2048  }
        10.10.10.84 : { nodename: redis-test-4  ,cpu: 1 ,mem: 2048  }
        10.10.10.85 : { nodename: redis-ms-1    ,cpu: 1 ,mem: 2048  }
        10.10.10.86 : { nodename: redis-ms-2    ,cpu: 1 ,mem: 2048  }
        10.10.10.88 : { nodename: test          ,cpu: 4 ,mem: 8192  }

    #==========================================================#
    # pg-meta: reuse infra node as meta cmdb
    #==========================================================#
    # ./pgsql.yml -l pg-meta
    pg-meta:
      hosts:
        10.10.10.10: { pg_seq: 1 , pg_role: primary }
        10.10.10.11: { pg_seq: 2 , pg_role: replica }
      vars:
        pg_cluster: pg-meta
        pg_vip_enabled: true
        pg_vip_address: 10.10.10.2/24
        pg_vip_interface: eth1
        pg_users:
          - {name: dbuser_meta     ,password: DBUser.Meta     ,pgbouncer: true ,roles: [dbrole_admin]    ,comment: pigsty admin user }
          - {name: dbuser_view     ,password: DBUser.Viewer   ,pgbouncer: true ,roles: [dbrole_readonly] ,comment: read-only viewer for meta database }
          - {name: dbuser_grafana  ,password: DBUser.Grafana  ,pgbouncer: true ,roles: [dbrole_admin]    ,comment: admin user for grafana database    }
          - {name: dbuser_bytebase ,password: DBUser.Bytebase ,pgbouncer: true ,roles: [dbrole_admin]    ,comment: admin user for bytebase database   }
          - {name: dbuser_kong     ,password: DBUser.Kong     ,pgbouncer: true ,roles: [dbrole_admin]    ,comment: admin user for kong api gateway    }
          - {name: dbuser_gitea    ,password: DBUser.Gitea    ,pgbouncer: true ,roles: [dbrole_admin]    ,comment: admin user for gitea service       }
          - {name: dbuser_wiki     ,password: DBUser.Wiki     ,pgbouncer: true ,roles: [dbrole_admin]    ,comment: admin user for wiki.js service     }
          - {name: dbuser_noco     ,password: DBUser.Noco     ,pgbouncer: true ,roles: [dbrole_admin]    ,comment: admin user for nocodb service      }
        pg_databases:
          - { name: meta ,baseline: cmdb.sql ,comment: pigsty meta database ,schemas: [pigsty] ,extensions: [{name: vector}]}
          - { name: grafana  ,owner: dbuser_grafana  ,revokeconn: true ,comment: grafana primary database }
          - { name: bytebase ,owner: dbuser_bytebase ,revokeconn: true ,comment: bytebase primary database }
          - { name: kong     ,owner: dbuser_kong     ,revokeconn: true ,comment: kong the api gateway database }
          - { name: gitea    ,owner: dbuser_gitea    ,revokeconn: true ,comment: gitea meta database }
          - { name: wiki     ,owner: dbuser_wiki     ,revokeconn: true ,comment: wiki meta database }
          - { name: noco     ,owner: dbuser_noco     ,revokeconn: true ,comment: nocodb database }
        pg_hba_rules:
          - { user: dbuser_view , db: all ,addr: infra ,auth: pwd ,title: 'allow grafana dashboard access cmdb from infra nodes' }
        pg_libs: 'pg_stat_statements, auto_explain' # add timescaledb to shared_preload_libraries
        node_crontab:  # make a full backup on monday 1am, and an incremental backup during weekdays
          - '00 01 * * 1 postgres /pg/bin/pg-backup full'
          - '00 01 * * 2,3,4,5,6,7 postgres /pg/bin/pg-backup'

    #==========================================================#
    # pg-v12 - v16 (no longer supported in OSS ver since 2.6)
    #==========================================================#
    # ./pgsql.yml -l pg-v*
    pg-v12:
      hosts: { 10.10.10.12: { pg_seq: 1 ,pg_role: primary } }
      vars:
        pg_cluster: pg-v12
        #pg_version: 12
        pg_extensions: []
        pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
        pg_databases: [ { name: v12 } ]
        pg_service_provider: proxy       # use load balancer on group `proxy` with port 10012
        pg_default_services:  [{ name: primary ,port: 10012 ,dest: postgres  ,check: /primary   ,selector: "[]" }]

    pg-v13:
      hosts: { 10.10.10.13: { pg_seq: 1 ,pg_role: primary } }
      vars:
        pg_cluster: pg-v13
        #pg_version: 13
        pg_extensions: []
        pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
        pg_databases: [ { name: v13 } ]
        pg_service_provider: proxy       # use load balancer on group `proxy` with port 10013
        pg_default_services:  [{ name: primary ,port: 10013 ,dest: postgres  ,check: /primary   ,selector: "[]" }]

    pg-v14:
      hosts: { 10.10.10.14: { pg_seq: 1 ,pg_role: primary } }
      vars:
        pg_cluster: pg-v14
        #pg_version: 14
        pg_extensions: []
        pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
        pg_databases: [ { name: v14 } ]
        pg_service_provider: proxy       # use load balancer on group `proxy` with port 10014
        pg_default_services:  [{ name: primary ,port: 10014 ,dest: postgres  ,check: /primary   ,selector: "[]" }]

    pg-v15:
      hosts: { 10.10.10.15: { pg_seq: 1 ,pg_role: primary } }
      vars:
        pg_cluster: pg-v15
        #pg_version: 15
        pg_extensions: []
        pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
        pg_databases: [ { name: v15 } ]
        pg_service_provider: proxy       # use load balancer on group `proxy` with port 10013
        pg_default_services:  [{ name: primary ,port: 10015 ,dest: postgres  ,check: /primary   ,selector: "[]" }]

    pg-v16: # beware that PostgreSQL 16 is not available on el7
      hosts: { 10.10.10.16: { pg_seq: 1 ,pg_role: primary } }
      vars:
        pg_cluster: pg-v16
        #pg_version: 16
        pg_extensions: []
        pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
        pg_databases: [ { name: v16 } ]
        pg_service_provider: proxy       # use load balancer on group `proxy` with port 10013
        pg_default_services:  [{ name: primary ,port: 10016 ,dest: postgres  ,check: /primary   ,selector: "[]" }]


    #==========================================================#
    # etcd: 5 nodes used as dedicated minio cluster
    #==========================================================#
    # ./etcd.yml -l etcd;
    etcd:  # the 5 node etcd dcs cluster
      hosts:
        10.10.10.21: { etcd_seq: 1 , nodename: etcd-1 }
        10.10.10.22: { etcd_seq: 2 , nodename: etcd-2 }
        10.10.10.23: { etcd_seq: 3 , nodename: etcd-3 }
        10.10.10.24: { etcd_seq: 4 , nodename: etcd-4 }
        10.10.10.25: { etcd_seq: 5 , nodename: etcd-5 }
      vars:
        etcd_cluster: etcd
        node_cluster: etcd


    #==========================================================#
    # minio: 3 nodes used as dedicated minio cluster
    #==========================================================#
    # ./minio.yml -l minio;
    minio:   # access service via sss.pigsty:9002
      hosts:
        10.10.10.26: { minio_seq: 1 , nodename: minio-1 }
        10.10.10.27: { minio_seq: 2 , nodename: minio-2 }
        10.10.10.28: { minio_seq: 3 , nodename: minio-3 }
      vars:
        minio_cluster: minio
        node_cluster: minio
        minio_data: '/data'                 # it should be a raw device for a multi-node deployment!
        minio_node: '${minio_cluster}-${minio_seq}.pigsty' # minio node name pattern

        vip_enabled: true
        vip_vrid: 127
        vip_address: 10.10.10.100
        vip_interface: eth1

        haproxy_services:                   # expose minio service via haproxy node port 9002
          - name: minio                     # [REQUIRED] service name, unique
            port: 9002                      # [REQUIRED] service port, unique
            options:                        # [OPTIONAL] minio health check
              - option httpchk
              - option http-keep-alive
              - http-check send meth OPTIONS uri /minio/health/live
              - http-check expect status 200
            servers:
              - { name: minio-1 ,ip: 10.10.10.26 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
              - { name: minio-2 ,ip: 10.10.10.27 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
              - { name: minio-3 ,ip: 10.10.10.28 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }


    #==========================================================#
    # proxy: 2 nodes used as dedicated haproxy server
    #==========================================================#
    # ./node.yml -l proxy
    proxy:
      hosts:
        10.10.10.29: { nodename: proxy-1 }
        10.10.10.30: { nodename: proxy-2 }
      vars:
        node_cluster: proxy
        vip_enabled: true
        vip_vrid: 128
        vip_address: 10.10.10.99
        vip_interface: eth1


    #==========================================================#
    # pg-test: dedicate 4 node testing cluster
    #==========================================================#
    # ./pgsql.yml -l pg-test
    pg-test:
      hosts:
        10.10.10.41: { pg_seq: 1 ,pg_role: primary }
        10.10.10.42: { pg_seq: 2 ,pg_role: replica }
        10.10.10.43: { pg_seq: 3 ,pg_role: replica }
        10.10.10.44: { pg_seq: 4 ,pg_role: replica }
      vars:
        pg_cluster: pg-test
        pg_vip_enabled: true
        pg_vip_address: 10.10.10.3/24
        pg_vip_interface: eth1
        pg_users:  [{ name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] }]
        pg_databases: [{ name: test }]


    #==========================================================#
    # pg-src: dedicate 3 node testing cluster
    #==========================================================#
    # ./pgsql.yml -l pg-src
    pg-src:
      hosts:
        10.10.10.45: { pg_seq: 1 ,pg_role: primary }
        10.10.10.46: { pg_seq: 2 ,pg_role: replica }
        10.10.10.47: { pg_seq: 3 ,pg_role: replica }
      vars:
        pg_cluster: pg-src
        #pg_version: 14
        pg_vip_enabled: true
        pg_vip_address: 10.10.10.4/24
        pg_vip_interface: eth1
        pg_users:  [{ name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] }]
        pg_databases: [{ name: src }]


    #==========================================================#
    # pg-dst: dedicate 2 node testing cluster
    #==========================================================#
    # ./pgsql.yml -l pg-dst
    pg-dst:
      hosts:
        10.10.10.48: { pg_seq: 1 ,pg_role: primary } # 8C 8G
        10.10.10.49: { pg_seq: 2 ,pg_role: replica } # 1C 2G
      vars:
        pg_cluster: pg-dst
        pg_vip_enabled: true
        pg_vip_address: 10.10.10.5/24
        pg_vip_interface: eth1
        node_hugepage_ratio: 0.3
        pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
        pg_databases: [ { name: dst } ]


    #==========================================================#
    # pg-citus: 10 node citus cluster (5 x primary-replica pair)
    #==========================================================#
    pg-citus: # citus group
      hosts:
        10.10.10.50: { pg_group: 0, pg_cluster: pg-citus0 ,pg_vip_address: 10.10.10.60/24 ,pg_seq: 0, pg_role: primary }
        10.10.10.51: { pg_group: 0, pg_cluster: pg-citus0 ,pg_vip_address: 10.10.10.60/24 ,pg_seq: 1, pg_role: replica }
        10.10.10.52: { pg_group: 1, pg_cluster: pg-citus1 ,pg_vip_address: 10.10.10.61/24 ,pg_seq: 0, pg_role: primary }
        10.10.10.53: { pg_group: 1, pg_cluster: pg-citus1 ,pg_vip_address: 10.10.10.61/24 ,pg_seq: 1, pg_role: replica }
        10.10.10.54: { pg_group: 2, pg_cluster: pg-citus2 ,pg_vip_address: 10.10.10.62/24 ,pg_seq: 0, pg_role: primary }
        10.10.10.55: { pg_group: 2, pg_cluster: pg-citus2 ,pg_vip_address: 10.10.10.62/24 ,pg_seq: 1, pg_role: replica }
        10.10.10.56: { pg_group: 3, pg_cluster: pg-citus3 ,pg_vip_address: 10.10.10.63/24 ,pg_seq: 0, pg_role: primary }
        10.10.10.57: { pg_group: 3, pg_cluster: pg-citus3 ,pg_vip_address: 10.10.10.63/24 ,pg_seq: 1, pg_role: replica }
        10.10.10.58: { pg_group: 4, pg_cluster: pg-citus4 ,pg_vip_address: 10.10.10.64/24 ,pg_seq: 0, pg_role: primary }
        10.10.10.59: { pg_group: 4, pg_cluster: pg-citus4 ,pg_vip_address: 10.10.10.64/24 ,pg_seq: 1, pg_role: replica }
      vars:
        pg_mode: citus                    # pgsql cluster mode: citus
        pg_shard: pg-citus                # citus shard name: pg-citus
        pg_primary_db: test               # primary database used by citus
        pg_dbsu_password: DBUser.Postgres # all dbsu password access for citus cluster
        pg_vip_enabled: true
        pg_vip_interface: eth1
        pg_extensions: [ 'citus postgis pgvector' ]
        pg_libs: 'citus, timescaledb, pg_stat_statements, auto_explain' # citus will be added by patroni automatically
        pg_users: [ { name: test ,password: test ,pgbouncer: true ,roles: [ dbrole_admin ] } ]
        pg_databases: [ { name: test ,owner: test ,extensions: [ { name: citus }, { name: vector } ] } ]
        pg_hba_rules:
          - { user: 'all' ,db: all  ,addr: 10.10.10.0/24 ,auth: trust ,title: 'trust citus cluster members'        }
          - { user: 'all' ,db: all  ,addr: 127.0.0.1/32  ,auth: ssl   ,title: 'all user ssl access from localhost' }
          - { user: 'all' ,db: all  ,addr: intra         ,auth: ssl   ,title: 'all user ssl access from intranet'  }

    #==========================================================#
    # redis-meta: reuse the 5 etcd nodes as redis sentinel
    #==========================================================#
    # ./redis.yml -l redis-meta
    redis-meta:
      hosts:
        10.10.10.21: { redis_node: 1 , redis_instances: { 26379: {} } }
        10.10.10.22: { redis_node: 2 , redis_instances: { 26379: {} } }
        10.10.10.23: { redis_node: 3 , redis_instances: { 26379: {} } }
        10.10.10.24: { redis_node: 4 , redis_instances: { 26379: {} } }
        10.10.10.25: { redis_node: 5 , redis_instances: { 26379: {} } }
      vars:
        redis_cluster: redis-meta
        redis_password: 'redis.meta'
        redis_mode: sentinel
        redis_max_memory: 256MB
        redis_sentinel_monitor:  # primary list for redis sentinel, use cls as name, primary ip:port
          - { name: redis-src, host: 10.10.10.45, port: 6379 ,password: redis.src, quorum: 1 }
          - { name: redis-dst, host: 10.10.10.48, port: 6379 ,password: redis.dst, quorum: 1 }


    #==========================================================#
    # redis-src: reuse pg-src 3 nodes for redis
    #==========================================================#
    # ./redis.yml -l redis-src
    redis-src:
      hosts:
        10.10.10.45: { redis_node: 1 , redis_instances: {6379: {  } }}
        10.10.10.46: { redis_node: 2 , redis_instances: {6379: { replica_of: '10.10.10.45 6379' }, 6380: { replica_of: '10.10.10.46 6379' } }}
        10.10.10.47: { redis_node: 3 , redis_instances: {6379: { replica_of: '10.10.10.45 6379' }, 6380: { replica_of: '10.10.10.47 6379' } }}
      vars:
        redis_cluster: redis-src
        redis_password: 'redis.src'
        redis_max_memory: 64MB

    #==========================================================#
    # redis-dst: reuse pg-dst 2 nodes for redis
    #==========================================================#
    # ./redis.yml -l redis-dst
    redis-dst:
      hosts:
        10.10.10.48: { redis_node: 1 , redis_instances: {6379: {  }                               }}
        10.10.10.49: { redis_node: 2 , redis_instances: {6379: { replica_of: '10.10.10.48 6379' } }}
      vars:
        redis_cluster: redis-dst
        redis_password: 'redis.dst'
        redis_max_memory: 64MB

    #==========================================================#
    # redis-test: redis native cluster in 6 nodes, 24 instances
    #==========================================================#
    # ./node.yml -l redis-test; ./redis.yml -l redis-test
    redis-test:
      hosts:
        10.10.10.81: { redis_node: 1 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
        10.10.10.82: { redis_node: 2 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
        10.10.10.83: { redis_node: 3 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
        10.10.10.84: { redis_node: 4 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
        10.10.10.85: { redis_node: 5 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
        10.10.10.86: { redis_node: 6 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
      vars:
        redis_cluster: redis-test
        redis_password: 'redis.test'
        redis_mode: cluster
        redis_max_memory: 512MB
        node_cluster: redis-test


    #==========================================================#
    # ferret: reuse pg-src as mongo (ferretdb)
    #==========================================================#
    # ./mongo.yml -l ferret
    ferret:
      hosts:
        10.10.10.45: { mongo_seq: 1 }
        10.10.10.46: { mongo_seq: 2 }
        10.10.10.47: { mongo_seq: 3 }
      vars:
        mongo_cluster: ferret
        mongo_pgurl: 'postgres://test:[email protected]:5432/src'
        #mongo_pgurl: 'postgres://test:[email protected]:5436/test'


    #==========================================================#
    # test: running cli tools and test miscellaneous stuff
    #==========================================================#
    test:
      hosts: { 10.10.10.88: { nodename: test } }
      vars:
        node_cluster: test
        node_packages: [ 'etcd,logcli,mcli,redis' ]


  #============================================================#
  # Global Variables
  #============================================================#
  vars:

    #==========================================================#
    # INFRA
    #==========================================================#
    version: v3.0.4                   # pigsty version string
    admin_ip: 10.10.10.10             # admin node ip address
    region: default                   # upstream mirror region: default|china|europe
    infra_portal:                     # domain names and upstream servers
      home         : { domain: h.pigsty }
      grafana      : { domain: g.pigsty ,endpoint: "10.10.10.10:3000" , websocket: true }
      prometheus   : { domain: p.pigsty ,endpoint: "10.10.10.10:9090" }
      alertmanager : { domain: a.pigsty ,endpoint: "10.10.10.10:9093" }
      blackbox     : { endpoint: "10.10.10.10:9115" }
      loki         : { endpoint: "10.10.10.10:3100" }
      minio        : { domain: sss.pigsty  ,endpoint: "10.10.10.26:9001" ,scheme: https ,websocket: true }
      postgrest    : { domain: api.pigsty  ,endpoint: "127.0.0.1:8884" }
      pgadmin      : { domain: adm.pigsty  ,endpoint: "127.0.0.1:8885" }
      pgweb        : { domain: cli.pigsty  ,endpoint: "127.0.0.1:8886" }
      bytebase     : { domain: ddl.pigsty  ,endpoint: "127.0.0.1:8887" }
      jupyter      : { domain: lab.pigsty  ,endpoint: "127.0.0.1:8888"  , websocket: true }
      supa         : { domain: supa.pigsty ,endpoint: "10.10.10.10:8000", websocket: true }
    nginx_navbar: []
    dns_records:                      # dynamic dns records resolved by dnsmasq
      - 10.10.10.1 h.pigsty a.pigsty p.pigsty g.pigsty

    #==========================================================#
    # NODE
    #==========================================================#
    node_conf: tiny                   # use small vm tune template
    node_timezone: Asia/Hong_Kong     # use Asia/Hong_Kong Timezone
    node_dns_servers:                 # DNS servers in /etc/resolv.conf
      - 10.10.10.10
      - 10.10.10.11
    node_etc_hosts:
      - 10.10.10.10 h.pigsty a.pigsty p.pigsty g.pigsty
      - 10.10.10.26 sss.pigsty
    node_ntp_servers:                 # NTP servers in /etc/chrony.conf
      - pool cn.pool.ntp.org iburst
      - pool 10.10.10.10 iburst
    node_admin_ssh_exchange: false    # exchange admin ssh key among node cluster

    #==========================================================#
    # PGSQL
    #==========================================================#
    pg_conf: tiny.yml
    pgbackrest_method: minio          # USE THE HA MINIO THROUGH A LOAD BALANCER
    pg_dbsu_ssh_exchange: false       # do not exchange dbsu ssh key among pgsql cluster
    pgbackrest_repo:                  # pgbackrest repo: https://pgbackrest.org/configuration.html#section-repository
      local:                          # default pgbackrest repo with local posix fs
        path: /pg/backup              # local backup directory, `/pg/backup` by default
        retention_full_type: count    # retention full backups by count
        retention_full: 2             # keep 2, at most 3 full backup when using local fs repo
      minio:
        type: s3
        s3_endpoint: sss.pigsty       # s3_endpoint could be any load balancer: 10.10.10.1{0,1,2}, or domain names point to any of the 3 nodes
        s3_region: us-east-1          # you could use external domain name: sss.pigsty , which resolve to any members  (`minio_domain`)
        s3_bucket: pgsql              # instance & nodename can be used : minio-1.pigsty minio-1.pigsty minio-1.pigsty minio-1 minio-2 minio-3
        s3_key: pgbackrest            # Better using a new password for MinIO pgbackrest user
        s3_key_secret: S3User.Backup
        s3_uri_style: path
        path: /pgbackrest
        storage_port: 9002            # Use the load balancer port 9002 instead of default 9000 (direct access)
        storage_ca_file: /etc/pki/ca.crt
        bundle: y
        cipher_type: aes-256-cbc      # Better using a new cipher password for your production environment
        cipher_pass: pgBackRest.${pg_cluster}
        retention_full_type: time
        retention_full: 14

...

Caveat


Last modified 2024-11-13: routine update (58464bf2)