-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathdeploy.py
96 lines (80 loc) · 3 KB
/
deploy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
from dataclasses import dataclass
from dotenv import load_dotenv
import os
import paramiko
def connect(host: str) -> paramiko.SSHClient:
ssh = paramiko.SSHClient()
k = paramiko.RSAKey.from_private_key_file(os.environ['SSH_KEY_PATH'])
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=host, username='ubuntu', pkey=k)
return ssh
def run(client, command):
print(f'[{w.envvar}] Running: {cmd}')
stdin, stdout, stderr = client.exec_command(cmd)
print(stdout.read().decode('utf-8'))
print(stderr.read().decode('utf-8'))
@dataclass(frozen=True)
class Worker:
envvar: str
# used for both container name and Dockerfile name
container_name: str
docker_run_args: str
if __name__ == "__main__":
choice = input("About to deploy. Are you sure? [y/N] ")
if choice.lower()[0] == 'y':
load_dotenv()
pg_host = os.environ['EC2_DIVISORDB']
# topologically sorted in dependency order,
# first entry has no dependencies
workers = [
Worker(
envvar='EC2_DIVISORDB',
container_name='divisordb',
# this requires first time setup to run
# docker volume create pgdata
docker_run_args='-p 5432:5432 -v pgdata:/var/lib/postgresql/data',
),
Worker(
envvar='EC2_GENERATOR',
container_name='generate',
docker_run_args=f'--env PGHOST="{pg_host}"',
),
Worker(
envvar='EC2_CLEANUP',
container_name='cleanup',
docker_run_args=f'--env PGHOST="{pg_host}"',
),
Worker(
envvar='EC2_PROCESSOR1',
container_name='process',
docker_run_args=f'--env PGHOST="{pg_host}"',
),
Worker(
envvar='EC2_PROCESSOR2',
container_name='process',
docker_run_args=f'--env PGHOST="{pg_host}"',
),
]
instances = {}
for w in workers:
host = os.environ[w.envvar]
print(f"Connecting to {w.envvar}={host}")
instances[w] = connect(host)
# stop containers, remove old images, and build new images
for w in reversed(workers):
client = instances[w]
cmd = f'docker stop {w.container_name}; docker rm {w.container_name}; docker image prune -f --filter "until=24h"'
run(client, cmd)
cmd = (
f'cd riemann-divisor-sum && '
f'git reset --hard origin/main && git pull && '
f'docker build -t {w.container_name} -f docker/{w.container_name}.Dockerfile .'
)
run(client, cmd)
for w in workers:
client = instances[w]
cmd = (
f'docker run -d --name {w.container_name} {w.docker_run_args} {w.container_name}:latest'
f' && sleep 5'
)
run(client, cmd)