generated from ministryofjustice/template-repository
-
Notifications
You must be signed in to change notification settings - Fork 0
243 lines (221 loc) · 9.39 KB
/
deploy-workflow.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
name: Deploy
on:
workflow_call:
inputs:
env:
description: "which environment to deploy to"
required: true
type: string
datahub_helm_version:
description: "version of the datahub helm chart to use for deploy"
required: true
type: string
datahub_prereqs_helm_version:
description: "version of the datahub prerequisites helm chart to use for deploy"
required: true
type: string
secrets:
kube_namespace:
description: "the kubernetes namespace to deploy to"
required: true
kube_cert:
description: "cert used to verify identity to cluster"
required: true
kube_cluster:
description: "address of the cluster to connect to"
required: true
kube_token:
description: "used to authenticate to the cluster"
required: true
postgres_host:
description: "address of the metadata database, including port"
required: true
postgres_client_host:
description: "address of the metadata database, without port"
required: true
postgres_url:
description: "URI including the scheme designator (prefix) and database name"
required: true
opensearch_proxy_host:
description: "domain address to reach opensearch"
required: true
azure_client_secret:
description: "client secret for azure authentication"
required: true
concurrency:
group: ${{ inputs.env }}
jobs:
deploy:
name: Deploy Helm Chart into Cloud Platform
environment: ${{ inputs.env }}
runs-on: ubuntu-latest
permissions:
contents: write # This is required for actions/checkout
id-token: write # This is required for requesting the JWT
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Create cert certificate-authority
id: create-cert-authority
shell: bash
run: echo "${{ secrets.kube_cert }}" > ca.crt
- name: Authenticate to the cluster
shell: bash
id: authenticate
env:
KUBE_CLUSTER: ${{ secrets.kube_cluster }}
run: |
kubectl config set-cluster "${KUBE_CLUSTER}" --certificate-authority=ca.crt --server="https://${KUBE_CLUSTER}"
kubectl config set-credentials deploy-user --token="${{ secrets.kube_token }}"
kubectl config set-context "${KUBE_CLUSTER}" --cluster="${KUBE_CLUSTER}" --user=deploy-user --namespace="${{ secrets.kube_namespace }}"
kubectl config use-context "${KUBE_CLUSTER}"
- name: Create users-secret if it doesn't exist
shell: bash
id: create-users-secret-if-not-exists
env:
KUBE_NAMESPACE: ${{ secrets.kube_namespace }}
RELEASE_NAME: datahub
run: |
NS_SECRETS=$(kubectl get secrets -n ${KUBE_NAMESPACE} -o=jsonpath='{range .items..metadata}{.name}{"\n"}{end}')
USERS_SECRET_NAME=$(echo ${NS_SECRETS} | egrep "${RELEASE_NAME}-users-secret" || exit_code=$?
if (( exit_code > 1 )) ; then
exit $exit_code
fi)
if [[ -z ${USERS_SECRET_NAME} ]]; then
echo "users-secret doesn't exist. Creating..."
USER_PASS=$(openssl rand -base64 12)
cat >/tmp/user.props <<EOL
// new user.props
datahub:${USER_PASS}
EOL
kubectl create secret generic "${RELEASE_NAME}-users-secret" --from-file=/tmp/user.props -n ${KUBE_NAMESPACE}
fi
- name: add helm repo
shell: bash
id: add-helm-repo
continue-on-error: true
run: |
helm repo add datahub https://helm.datahubproject.io/
- name: update helm repos
shell: bash
id: update-helm-repo
continue-on-error: true
run: |
helm repo update datahub
- name: install datahub pre-requisites charts
shell: bash
id: upgrade-helm-prereqs
env:
CHART_VERSION: ${{ inputs.datahub_prereqs_helm_version }}
run: |
helm upgrade \
--install prerequisites datahub/datahub-prerequisites \
--version ${CHART_VERSION} \
--atomic --timeout 5m0s \
--values helm_deploy/values_prerequisites-base.yaml \
--namespace ${{ secrets.kube_namespace }}
- name: set env vars
shell: bash
id: set-envs
env:
KUBE_NAMESPACE: ${{ secrets.kube_namespace }}
BASE_HOST: apps.live.cloud-platform.service.justice.gov.uk
RELEASE_NAME: datahub
OPENSEARCH_PROXY_HOST: ${{ secrets.OPENSEARCH_PROXY_HOST }}
run: |-
echo "BASE_HOST=${BASE_HOST}" >> $GITHUB_ENV
echo "APP_SHORT_HOST=${KUBE_NAMESPACE/data-platform-/}.${BASE_HOST}" >> $GITHUB_ENV
echo "EXT_DNS_ID=${RELEASE_NAME}-datahub-frontend-${{ inputs.env }}-${KUBE_NAMESPACE}-green" >> $GITHUB_ENV
echo "OPENSEARCH_DOMAIN=$(echo ${OPENSEARCH_PROXY_HOST} | sed -e 's/[.].*$//' -e 's/opensearch-proxy-service-//')" >> $GITHUB_ENV
- name: create azure k8s secrets
shell: bash
env:
KUBE_NAMESPACE: ${{ secrets.kube_namespace }}
AZURE_CLIENT_ID: ${{ vars.CLIENT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
TENANT_ID: ${{ vars.TENANT_ID }}
run: |
envsubst < helm_deploy/secrets.yaml |
kubectl -n ${KUBE_NAMESPACE} apply -f -
- name: install datahub helm charts
shell: bash
id: upgrade-helm-datahub
env:
APP_SHORT_HOST: ${{ env.APP_SHORT_HOST }}
CHART_VERSION: ${{ inputs.datahub_helm_version }}
IRSA_SA: data-catalogue-${{ inputs.env }}
OPENSEARCH_PROXY_HOST: ${{ secrets.OPENSEARCH_PROXY_HOST }}
POSTGRES_CLIENT_HOST: ${{ secrets.postgres_client_host }}
POSTGRES_HOST: ${{ secrets.postgres_host }}
POSTGRES_URL: ${{ secrets.postgres_url }}
RELEASE_NAME: datahub
FRONTEND_FULLNAME: datahub-frontend-${{ inputs.env }}
# if many env-specific variables need setting, add --values files after 'base'
# e.g. `--values helm_deploy/values-${{ inputs.env }}.yaml \`
run: |
helm upgrade \
--install ${RELEASE_NAME} datahub/datahub \
--version ${CHART_VERSION} \
--atomic --debug --timeout 10m0s \
--values helm_deploy/values-base.yaml \
--namespace ${{ secrets.kube_namespace }} \
--set datahub-frontend.fullnameOverride=${RELEASE_NAME}-${FRONTEND_FULLNAME} \
--set datahub-frontend.oidcAuthentication.azureTenantId=${{ vars.TENANT_ID }} \
--set datahub-frontend.oidcAuthentication.clientId=${{ vars.CLIENT_ID }} \
--set datahub-frontend.ingress.tls[0].hosts[0]=${APP_SHORT_HOST} \
--set datahub-frontend.ingress.hosts[0].host=${APP_SHORT_HOST} \
--set datahub-frontend.ingress.annotations.external-dns\\.alpha\\.kubernetes\\.io/set-identifier=${EXT_DNS_ID} \
--set acryl-datahub-actions.serviceAccount.name=${IRSA_SA} \
--set global.elasticsearch.host=${OPENSEARCH_PROXY_HOST} \
--set global.sql.datasource.host=${POSTGRES_HOST} \
--set global.sql.datasource.hostForpostgresqlClient=${POSTGRES_CLIENT_HOST} \
--set global.sql.datasource.url=${POSTGRES_URL}
- name: allow CP prometheus scraping
shell: bash
id: allow-prom-scrape
env:
KUBE_NAMESPACE: ${{ secrets.kube_namespace }}
run: |
envsubst < helm_deploy/monitoring/datahub-networkpolicy.yaml |
kubectl apply -f - --namespace=${KUBE_NAMESPACE}
- name: update grafana status dashboard configmap
if: ${{ inputs.env == 'dev' }}
shell: bash
id: update-grafana-status-dashboard
env:
KUBE_NAMESPACE: ${{ secrets.kube_namespace }}
DASHBOARD: "datahub-status-dashboard"
DASHBOARD_FILE: "datahub-dashboard.json"
run: |
kubectl create configmap ${DASHBOARD} \
--from-file="helm_deploy/monitoring/${DASHBOARD_FILE}" \
--dry-run \
--output yaml |
kubectl label -f- \
--dry-run \
--output yaml \
--local grafana_dashboard=${DASHBOARD} |
kubectl apply -f- \
--namespace=${KUBE_NAMESPACE}
- name: update grafana deployment dashboard configmap
if: ${{ inputs.env == 'dev' }}
shell: bash
id: update-grafana-deployment-dashboard
env:
KUBE_NAMESPACE: ${{ secrets.kube_namespace }}
DASHBOARD: "datahub-deployment-dashboard"
DASHBOARD_FILE: "datahub-deployment-dashboard.json"
run: |
DASHBOARD_JSON="helm_deploy/monitoring/${DASHBOARD_FILE}"
jq --arg e ${OPENSEARCH_DOMAIN} '(.templating.list[] | (select(.label == "DomainName").current.text),select(.label == "DomainName").current.value) |= $e' $DASHBOARD_JSON > temp_json.json \
&& mv temp_json.json $DASHBOARD_JSON
kubectl create configmap ${DASHBOARD} \
--from-file=$DASHBOARD_JSON \
--dry-run \
--output yaml |
kubectl label -f- \
--dry-run \
--output yaml \
--local grafana_dashboard=${DASHBOARD} |
kubectl apply -f- \
--namespace=${KUBE_NAMESPACE}