forked from zalando/postgres-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
complete-postgres-manifest.yaml
187 lines (179 loc) · 5.46 KB
/
complete-postgres-manifest.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: acid-test-cluster
# labels:
# environment: demo
# annotations:
# "acid.zalan.do/controller": "second-operator"
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
spec:
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
teamId: "acid"
numberOfInstances: 2
users: # Application/Robot users
zalando:
- superuser
- createdb
enableMasterLoadBalancer: false
enableReplicaLoadBalancer: false
enableConnectionPooler: false # enable/disable connection pooler deployment
enableReplicaConnectionPooler: false # set to enable connectionPooler for replica service
allowedSourceRanges: # load balancers' source ranges for both master and replica services
- 127.0.0.1/32
databases:
foo: zalando
preparedDatabases:
bar:
defaultUsers: true
extensions:
pg_partman: public
pgcrypto: public
schemas:
data: {}
history:
defaultRoles: true
defaultUsers: false
postgresql:
version: "13"
parameters: # Expert section
shared_buffers: "32MB"
max_connections: "10"
log_statement: "all"
volume:
size: 1Gi
# storageClass: my-sc
# iops: 1000 # for EBS gp3
# throughput: 250 # in MB/s for EBS gp3
additionalVolumes:
- name: empty
mountPath: /opt/empty
targetContainers:
- all
volumeSource:
emptyDir: {}
# - name: data
# mountPath: /home/postgres/pgdata/partitions
# targetContainers:
# - postgres
# volumeSource:
# PersistentVolumeClaim:
# claimName: pvc-postgresql-data-partitions
# readyOnly: false
# - name: conf
# mountPath: /etc/telegraf
# subPath: telegraf.conf
# targetContainers:
# - telegraf-sidecar
# volumeSource:
# configMap:
# name: my-config-map
enableShmVolume: true
# spiloRunAsUser: 101
# spiloRunAsGroup: 103
# spiloFSGroup: 103
# podAnnotations:
# annotation.key: value
# serviceAnnotations:
# annotation.key: value
# podPriorityClassName: "spilo-pod-priority"
# tolerations:
# - key: postgres
# operator: Exists
# effect: NoSchedule
resources:
requests:
cpu: 10m
memory: 100Mi
limits:
cpu: 500m
memory: 500Mi
patroni:
initdb:
encoding: "UTF8"
locale: "en_US.UTF-8"
data-checksums: "true"
# pg_hba:
# - hostssl all all 0.0.0.0/0 md5
# - host all all 0.0.0.0/0 md5
# slots:
# permanent_physical_1:
# type: physical
# permanent_logical_1:
# type: logical
# database: foo
# plugin: pgoutput
ttl: 30
loop_wait: &loop_wait 10
retry_timeout: 10
synchronous_mode: false
synchronous_mode_strict: false
maximum_lag_on_failover: 33554432
# restore a Postgres DB with point-in-time-recovery
# with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp
# with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup
# clone:
# uid: "efd12e58-5786-11e8-b5a7-06148230260c"
# cluster: "acid-batman"
# timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6)
# s3_wal_path: "s3://custom/path/to/bucket"
# run periodic backups with k8s cron jobs
# enableLogicalBackup: true
# logicalBackupSchedule: "30 00 * * *"
# maintenanceWindows:
# - 01:00-06:00 #UTC
# - Sat:00:00-04:00
# overwrite custom properties for connection pooler deployments
# connectionPooler:
# numberOfInstances: 2
# mode: "transaction"
# schema: "pooler"
# user: "pooler"
# resources:
# requests:
# cpu: 300m
# memory: 100Mi
# limits:
# cpu: "1"
# memory: 100Mi
initContainers:
- name: date
image: busybox
command: [ "/bin/date" ]
# sidecars:
# - name: "telegraf-sidecar"
# image: "telegraf:latest"
# resources:
# limits:
# cpu: 500m
# memory: 500Mi
# requests:
# cpu: 100m
# memory: 100Mi
# env:
# - name: "USEFUL_VAR"
# value: "perhaps-true"
# Custom TLS certificate. Disabled unless tls.secretName has a value.
tls:
secretName: "" # should correspond to a Kubernetes Secret resource to load
certificateFile: "tls.crt"
privateKeyFile: "tls.key"
caFile: "" # optionally configure Postgres with a CA certificate
caSecretName: "" # optionally the ca.crt can come from this secret instead.
# file names can be also defined with absolute path, and will no longer be relative
# to the "/tls/" path where the secret is being mounted by default, and "/tlsca/"
# where the caSecret is mounted by default.
# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
# if unknown, set it to 103 which is the usual value in the default spilo images.
# In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup.
# Add node affinity support by allowing postgres pods to schedule only on nodes that
# have label: "postgres-operator:enabled" set.
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: postgres-operator
# operator: In
# values:
# - enabled