From 2f656177dd335ac01cbacb5f3c6c0653f39c3388 Mon Sep 17 00:00:00 2001 From: Jan Schumacher Date: Wed, 17 Jan 2024 17:52:18 +0100 Subject: [PATCH 1/8] WPB-2324 introduce more automation for dedicated Hetzner server deployments prior to kubespray upgrades --- ansible/files/hetzner_server_nftables.conf | 31 + ansible/files/hetzner_server_sshd_config | 23 + ansible/hetzner-single-deploy.yml | 141 +++++ bin/offline-vm-setup.sh | 201 ++++--- .../single_hetzner_machine_installation.md | 548 ++---------------- 5 files changed, 368 insertions(+), 576 deletions(-) create mode 100644 ansible/files/hetzner_server_nftables.conf create mode 100644 ansible/files/hetzner_server_sshd_config create mode 100644 ansible/hetzner-single-deploy.yml diff --git a/ansible/files/hetzner_server_nftables.conf b/ansible/files/hetzner_server_nftables.conf new file mode 100644 index 000000000..8520ad47c --- /dev/null +++ b/ansible/files/hetzner_server_nftables.conf @@ -0,0 +1,31 @@ +#!/usr/sbin/nft -f + +flush ruleset + +table inet filter { + chain block_definitions { + ct state established,related accept + ct state invalid drop + tcp flags != syn ct state new counter drop + counter drop +# log prefix "DROP " counter drop + } + chain INPUT { + type filter hook input priority 0; + ip protocol icmp icmp type echo-request counter accept + ip6 nexthdr ipv6-icmp icmpv6 type echo-request counter accept + ip6 nexthdr ipv6-icmp ip6 hoplimit 1 icmpv6 type { nd-neighbor-advert, nd-neighbor-solicit, nd-router-advert } counter accept + ip6 nexthdr ipv6-icmp ip6 hoplimit 255 icmpv6 type { nd-neighbor-advert, nd-neighbor-solicit, nd-router-advert } counter accept + iifname { lo, virbr0 } counter accept + tcp dport 22 counter accept comment "SSH incoming" + jump block_definitions + } + chain FORWARD { + type filter hook forward priority 0; + policy drop; + } + chain OUTPUT { + type filter hook output priority 0; + policy accept; + } +} diff --git a/ansible/files/hetzner_server_sshd_config b/ansible/files/hetzner_server_sshd_config new file mode 100644 index 000000000..354f182d4 --- /dev/null +++ b/ansible/files/hetzner_server_sshd_config @@ -0,0 +1,23 @@ +Port 22 + +AcceptEnv LANG LC_* +LogLevel verbose +PrintMotd no + +KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512 +Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com +MACs hmac-sha2-512-etm@openssh.com + +HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512 +CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512 +GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- +HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512 +PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,ecdsa-sha2-nistp521 + +PasswordAuthentication no +PubkeyAuthentication yes +ChallengeResponseAuthentication no + +Subsystem sftp /usr/lib/openssh/sftp-server +UsePAM yes +X11Forwarding no diff --git a/ansible/hetzner-single-deploy.yml b/ansible/hetzner-single-deploy.yml new file mode 100644 index 000000000..324200a10 --- /dev/null +++ b/ansible/hetzner-single-deploy.yml @@ -0,0 +1,141 @@ +- hosts: all + become: true + vars: + artifact_hash: a6e0929c9a5f4af09655c9433bb56a4858ec7574 + ubuntu_version: 22.04.3 + ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIODDzgw4BncNvSVOIraAXZYkkLn+iTo6ixkXXQ4lKZhL jan.schumacher@wire.com" + tasks: + - name: apt update + apt: update_cache=yes force_apt_get=yes + - name: apt upgrade + apt: upgrade=dist force_apt_get=yes + - name: install default packages + apt: + install_recommends: no + pkg: + - aptitude + - apt-transport-https + - bind9-host + - curl + - debian-goodies + - dnsutils + - git + - less + - lsof + - net-tools + - rsyslog + - screen + - sudo + - vim + - wget + - whois + - qemu + - qemu-kvm + - qemu-utils + - libvirt-clients + - libvirt-daemon-system + - virtinst + - bridge-utils + - name: generate german locales + locale_gen: + name: de_DE.UTF-8 + state: present + - name: generate us locales + locale_gen: + name: en_US.UTF-8 + state: present + - name: set system language + lineinfile: + path: /etc/default/locale + regexp: '^#?LANG=' + line: 'LANG="en_US.UTF-8"' + - name: set keyboard layout + lineinfile: + path: /etc/default/keyboard + regexp: '^#?XKBLAYOUT=' + line: 'XKBLAYOUT="us"' + - name: set keyboard variant + lineinfile: + path: /etc/default/keyboard + regexp: '^#?XKVARIANT=' + line: 'XKBVARIANT="de"' + - name: add default user accounts + user: + name: demo + groups: sudo, kvm + uid: 900 + state: present + shell: /bin/bash + password: "!" + - name: Adding SSH pubkey for user demo + authorized_key: + user: demo + state: present + key: "{{ ssh_pubkey }}" + - name: passwordless sudo + lineinfile: + dest: /etc/sudoers + regexp: '^%sudo' + line: "%sudo ALL=(ALL) NOPASSWD:ALL" + - name: deploy sshd config + copy: + src: files/hetzner_server_sshd_config + dest: /etc/ssh/sshd_config + mode: 0644 + owner: root + group: root + notify: sshd | restart + - name: deploy /etc/nftables.conf + copy: + src: files/hetzner_server_nftables.conf + dest: /etc/nftables.conf + mode: 0750 + owner: root + group: root + notify: nftables | restart + - name: create wire-server-deploy directory for demo user + file: + path: /home/demo/wire-server-deploy + state: directory + owner: demo + group: demo + mode: 0775 + - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists + stat: + path: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + register: artifact_archive_file_check + - name: download wire-server-deploy archive + shell: + cmd: curl -fsSLo /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-{{ artifact_hash }}.tgz + creates: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + when: not artifact_archive_file_check.stat.exists + - name: check if wire-server-deploy folderi contents exist + stat: + path: /home/demo/wire-server-deploy/containers-helm.tar + register: artifact_folder_content_check + - name: unpack wire-server-deploy archive + unarchive: + src: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + dest: /home/demo/wire-server-deploy + remote_src: yes + when: not artifact_folder_content_check.stat.exists + - name: check if ubuntu iso exists + stat: + path: /home/demo/wire-server-deploy/ubuntu.iso + register: iso_file_check + - name: download ubuntu {{ ubuntu_version }} iso + shell: + cmd: curl -fsSLo /home/demo/wire-server-deploy/ubuntu.iso https://releases.ubuntu.com/jammy/ubuntu-{{ ubuntu_version }}-live-server-amd64.iso + creates: /home/demo/wire-server-deploy/ubuntu.iso + when: not iso_file_check.stat.exists + + handlers: + - name: sshd | restart + service: + name: sshd + state: restarted + - name: nftables | restart + service: + name: nftables + enabled: true + state: restarted diff --git a/bin/offline-vm-setup.sh b/bin/offline-vm-setup.sh index 7c683f2b9..8f5d046f2 100755 --- a/bin/offline-vm-setup.sh +++ b/bin/offline-vm-setup.sh @@ -1,26 +1,101 @@ #!/usr/bin/env bash -set -eo pipefail +set -Eeuo pipefail -nocloud_basedir=/home/demo/wire-server-deploy/nocloud +if [[ $EUID -eq 0 ]]; then + echo "Please don't run me as root" 1>&2 + exit 1 +fi + +trap cleanup SIGINT SIGTERM ERR EXIT + +usage() { + cat <&2 -e "${1-}" +} + +die() { + local msg=$1 + local code=${2-1} # default exit status 1 + msg "$msg" + exit "$code" +} + +parse_params() { + while :; do + case "${1-}" in + -h | --help) usage ;; + -v | --verbose) set -x ;; + --deploy-vm) DEPLOY_SINGLE_VM=1 ;; + -?*) die "Unknown option: $1" ;; + *) break ;; + esac + shift + done + return 0 +} + +parse_params "$@" + +SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P) +DEPLOY_DIR="$(cd "$SCRIPT_DIR/../" && pwd)" +NOCLOUD_DIR=$DEPLOY_DIR/nocloud + +if [ ! -d "$NOCLOUD_DIR" ]; then + mkdir -p "$NOCLOUD_DIR" +fi + +if [[ -n "${DEPLOY_SINGLE_VM-}" ]]; then + VM_NAME="$2" +else + VM_NAME="assethost kubenode1 kubenode2 kubenode3 ansnode1 ansnode2 ansnode3" +fi + +nohup python3 -m http.server 3003 -d "$NOCLOUD_DIR" & + +if [[ -f ~/.ssh/authorized_keys && -s ~/.ssh/authorized_keys ]]; then + SSHKEY=$(head -n 1 ~/.ssh/authorized_keys) + echo "Including local SSH key ""$SSHKEY"" for VM deployment" +else + read -r -p "No local SSH keys for current user ""$USER"" found; please enter a key now: " SSHKEY +fi prepare_config() { - # Run - # export OFFLINE_PASSWORD="$(mkpasswd)" - # to set the hashed password - set -u - # shellcheck disable=SC2153 - offline_username=$OFFLINE_USERNAME - # shellcheck disable=SC2153 - offline_password=$OFFLINE_PASSWORD - set +u - - name="$1" - d=$nocloud_basedir/$name - mkdir -p "$d" - touch "$d"/vendor-data - touch "$d"/meta-data - cat >"$d/user-data"<"$VM_DIR/user-data"< /etc/sudoers.d/10-demo_user -chmod 440 /etc/sudoers.d/10-demo_user -``` - -## ssh in as demo user. - -on the remote machine: - -``` -logout -``` - -on the local machine: - -``` -ssh -i ~/.ssh/id_ed25519 demo@65.21.197.76 -o serveraliveinterval=60 -``` - -## Disable root login via ssh - -Use sudo to edit `/etc/ssh/sshd_config` - -``` -sudo nano /etc/ssh/sshd_config -``` - -And set the following: - -``` -# even better: don't allow to login as root via ssh at all -PermitRootLogin no -``` - -Or use this command: - -``` -sudo sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin no/g' /etc/ssh/sshd_config -``` - -### re-start SSH +## Adjust playbook vars as needed +Take a look at the "vars:" section in wire-server-deploy/ansible/hetzner-single-deploy.yml and adjust vars as needed. Example: ``` -sudo service ssh restart + vars: + artifact_hash: a6e0929c9a5f4af09655c9433bb56a4858ec7574 + ubuntu_version: 22.04.3 + ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIODDzgw4BncNvSVOIraAXZYkkLn+iTo6ixkXXQ4lKZhL jan.schumacher@wire.com" ``` -### Install screen +## Run ansible playbook for server bootstrapping ``` -sudo apt install screen +cd wire-server-deploy/ansible +ansible-playbook hetzner-single-deploy.yml -i root@$HETZNER_IP, --diff ``` +Please note and include the trailing comma when invoking the playbook. -### Start a screen session - -``` -screen -``` +The playbook will install baseline defaults (packages, firewall, SSH config, SSH key(s), user(s)), download & extract wire-server-deploy and download the specified ubuntu ISO. +The playbook is written to be idempotent; eg. files won't be redownloaded as long as they already exist on the target host. Deploying a new version of "wire-server-deploy" is as easy as removing the folder from the target host and updating the "artifact_hash" variable in the playbook. -### download offline artifact. - -Use the HASH provided by the wire +## Create VMs +SSH into the target host as demo@$HETZNER_IP and execute wire-server-deploy/bin/offline-vm-setup.sh ``` -wget https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-.tgz +demo@Ubuntu-2204-jammy-amd64-base:~$ cd wire-server-deploy/ +demo@Ubuntu-2204-jammy-amd64-base:~/wire-server-deploy$ bin/offline-vm-setup.sh ``` +Without arguments, the script will deploy seven VMs behind the default libvirt network (virbr0, 192.168.122.0/24). -### extract offline artifact. + * assethost + * kubenode1 + * kubenode2 + * kubenode3 + * ansnode1 + * ansnode2 + * ansnode3 +This will take up to 15 min (longer if the server still builds its MD RAID in the background). Once all VMs are deployed, they should be shut off. Status can be checked with: ``` -mkdir wire-server-deploy -cd wire-server-deploy -tar -xzf ../wire-server-deploy-static-*.tgz +demo@Ubuntu-2204-jammy-amd64-base:~$ sudo virsh list --all ``` -### extract debian archive - -We'll use the docker that is in the archive. +Hint: If your local machine is running Linux, use "virt-manager" to connect to the Hetzner server and make VM administration more comfortable. +Start all VMs: -For Ubuntu 22.04: - -``` -tar -xf debs-jammy.tar ``` - -### (FIXME: add iptables to the repo) Install Docker from debian archive. - -``` -sudo bash -c ' -set -eo pipefail; - -apt -y install iptables; -dpkg -i debs-jammy/public/pool/main/d/docker-ce/docker-ce-cli_*.deb; -dpkg -i debs-jammy/public/pool/main/c/containerd.io/containerd.io_*.deb; -dpkg -i debs-jammy/public/pool/main/d/docker-ce/docker-ce_*.deb; -dpkg --configure -a; -' -``` - -### (missing) point host OS to debian archive - -### (rewrite) Install networking tools - -We're going to install dnsmasq in order to provide DNS to virtual machines, and DHCP to virtual machines. networking will be handled by ufw. - -Note that dnsmasq always fails when it installs. the failures (red stuff) is normal. - -``` -sudo bash -c ' -set -eo pipefail; - -systemctl disable systemd-resolved; -apt install dnsmasq ufw -y; -systemctl stop systemd-resolved; -' -``` - -### Tell dnsmasq to provide DNS locally. - -``` -sudo bash -c ' -set -eo pipefail; - -echo "listen-address=127.0.0.53 -no-resolv -server=8.8.8.8 -" > /etc/dnsmasq.d/00-lo-systemd-resolvconf; -service dnsmasq restart; -' -``` - -### Configure Firewall - -``` -sudo bash -c ' -set -eo pipefail; - -ufw allow 22/tcp; -ufw allow from 172.16.0.0/24 proto udp to any port 53; -ufw allow from 172.16.0.0/24 proto tcp to any port 3003; -ufw allow from 127.0.0.0/24 proto udp to any port 53; -ufw allow in on br0 from any proto udp to any port 67; -ufw allow Openssh; -ufw enable; -' -``` - -### Install libvirt and dependencies - -We will install libvirt to create the vms for deployment - -``` -sudo bash -c ' -set -eo pipefail; - -apt install -y qemu qemu-kvm qemu-utils libvirt-clients libvirt-daemon-system virtinst bridge-utils virt-manager; -systemctl enable libvirtd; -systemctl start libvirtd; -' -``` - -### add the demo user to the kvm and docker group - -``` -sudo usermod -a -G kvm -G docker demo -``` - -### log out, log back in, and return to Wire-Server. - -you have to exit screen, and logout - -``` -exit -logout -``` - -``` -ssh -i ~/.ssh/id_ed25519 demo@65.21.197.76 -o serveraliveinterval=60 -cd wire-server-deploy/ -screen -``` - -### install bridge-utils - -So that we can manage the virtual network. - -``` -sudo apt install bridge-utils net-tools -y -``` - -### (personal) install emacs - -``` -sudo apt install emacs-nox -y -``` - -### (temporary) manually create bridge device. - -This is the interface we are going to use to talk to the virtual machines. - -``` -sudo bash -c ' -set -eo pipefail; - -brctl addbr br0; -ifconfig br0 172.16.0.1 netmask 255.255.255.0 up; -' -``` - -### tell DnsMasq to provide DHCP to our KVM VMs. - -``` -sudo bash -c ' -set -eo pipefail; - -echo "listen-address=172.16.0.1 -dhcp-range=172.16.0.2,172.16.0.127,10m -" > /etc/dnsmasq.d/10-br0-dhcp; -service dnsmasq restart; -' -``` - -### enable ip forwarding. - -``` -sudo bash -c ' -set -eo pipefail; - -sed -i "s/.*net.ipv4.ip_forward.*/net.ipv4.ip_forward=1/" /etc/sysctl.conf; -sysctl -p; -' -``` - -### Enable network masquerading - -To prepare determine the interface of your outbound IP: - -``` -export OUTBOUNDINTERFACE=$(ip ro | sed -n "/default/s/.* dev \([en\(ps|o)0-9]*\) .*/\1/p") -echo OUTBOUNDINTERFACE is $OUTBOUNDINTERFACE -``` - -Please Check that `OUTBOUNDINTERFACE` is correctly set, before running enabling network masquerading: - -``` -sudo bash -c " -set -eo pipefail; - -sed -i 's/.*DEFAULT_FORWARD_POLICY=.*/DEFAULT_FORWARD_POLICY="ACCEPT"/' /etc/default/ufw; -sed -i \"1i *nat\n:POSTROUTING ACCEPT [0:0]\n-A POSTROUTING -s 172.16.0.0/24 -o $OUTBOUNDINTERFACE -j MASQUERADE\nCOMMIT\" /etc/ufw/before.rules; -service ufw restart; -" -``` - -### Add static IPs for VMs. - -``` -sudo bash -c ' -set -eo pipefail; - -echo " -dhcp-host=assethost,172.16.0.128,10h -dhcp-host=kubenode1,172.16.0.129,10h -dhcp-host=kubenode2,172.16.0.130,10h -dhcp-host=kubenode3,172.16.0.131,10h -dhcp-host=ansnode1,172.16.0.132,10h -dhcp-host=ansnode2,172.16.0.133,10h -dhcp-host=ansnode3,172.16.0.134,10h -" > /etc/dnsmasq.d/20-hosts; -service dnsmasq restart; -' -``` - -### Acquire ubuntu 22.04 server installation CD (server). - -``` -curl https://releases.ubuntu.com/jammy/ubuntu-22.04.3-live-server-amd64.iso -o ubuntu.iso -sudo setfacl -m u:libvirt-qemu:--x /home/demo -``` - -## Create 7 vms for deployment - -``` -sudo mkdir -p /var/kvm/images/ # place to store the drive images for vms -``` - -Create a separate window with screen (Ctrl+A C) and run -``` -./bin/offline-vm-setup.sh serve_nocloud -``` -This starts a web server on port 3003 to serve no_cloud configuration files for the automated VM installation process. Leave this running until you have created all the VMS. Switch back to your previous screen window (Ctrl+A N). - -Pick a username and password for the default non-root user that will be created on all VMS. -``` -sudo apt install whois # needed for mkpass -export OFFLINE_USERNAME=demo # feel free to change this -export OFFLINE_PASSWORD=$(mkpasswd) # this fill prompt you for a password -``` - -Create VMS in parallel and start the automatic ubuntu installation process -``` -# run these commands one after the other -./bin/offline-vm-setup.sh create_assethost assethost -./bin/offline-vm-setup.sh create_node kubenode1 -./bin/offline-vm-setup.sh create_node kubenode2 -./bin/offline-vm-setup.sh create_node kubenode3 -./bin/offline-vm-setup.sh create_node ansnode1 -./bin/offline-vm-setup.sh create_node ansnode2 -./bin/offline-vm-setup.sh create_node ansnode3 -``` - -This can take a while. While the installation is running check the state of all VMS by running`sudo virsh list --all`. You can attach a console via `sudo virsh console ansnode1` to obvserve the installation progress (Ctrl+5 to detach the console). After the installation completed the state of of each vm should be listed as `shut off`. - -Run -``` -sudo virsh list --all -``` -and check that every VM has completed installation, i.e. it is in `shut off` state. -Start all VMS - -``` -sudo bash -c " +demo@Ubuntu-2204-jammy-amd64-base:~$ sudo bash -c " set -e; virsh start assethost; virsh start kubenode1; @@ -460,104 +81,29 @@ virsh start ansnode3; " ``` -Check for every VM that dnsmasq was assigned the correct IP adress. Compare with the corresponding entry in the file `20-hosts` (see above). -For example for VM `ansnode1` run this command: - -``` -cat /etc/dnsmasq.d/20-hosts -ssh demo@ansnode1 ip a show enp1s0 -``` - -Repeat this vor all VMs. - -In case you had to destroy and recreate VMs the IP addresses might not match, because dnsmasq already has still leases for the previously created VMS. -In that case stop dnsmsq, manually remove the outdated leases from `/var/lib/misc/dnsmasq.leases`, start dnsmasq, then reboot the VM. - - -## disable internet access to the vms - -Replace all of ntftables.conf - -``` -sudo nano /etc/nftables.conf -``` - -With this content: - -``` -#!/usr/sbin/nft -f - -flush ruleset +## Access VMs -table inet filter { - chain input { - type filter hook input priority 0; - } - chain forward { - type filter hook forward priority 0; - policy accept; - ct state {established, related} accept comment "allow tracked connections"; - iifname "br0" oifname "enp7s0" drop comment "Drop connections that VMs try to initiate with the internet"; - } - chain output { - type filter hook output priority 0; - } -} -``` - -Then run: +VMs created with offline-vm-setup.sh are accessible via SSH with the public key which was deployed to the "demo" user in the ansible playbook. In order to access individual VMs, use SSH with ProxyJump: ``` -sudo systemctl restart nftables +~ ❯ ssh demo@192.168.122.XXX -J demo@$HETZNER_IP ``` -ssh into a vm and see if you can access the internet - - +## TODO - clarify if Docker daemon on Hetzner server can be installed via "apt install docker.io" or needs to be installed from the artifact archive, like so: ``` -ping 8.8.8.8 +demo@Ubuntu-2204-jammy-amd64-base:~/wire-server-deploy$ tar -xf debs-jammy.tar ``` -the above command shouldn't receive the packets, -in case internet is working than - restart libvirt from host machine - -``` -sudo systemctl start libvirtd ``` +demo@Ubuntu-2204-jammy-amd64-base:~/wire-server-deploy$ sudo bash -c ' +set -eo pipefail; -ssh into each vm and reboot them - -and check again, until internet is disabled on the vms. - -In rare case, if we want to enable internet access on the vms for some test related purpose, -comment the line -- iifname "br0" oifname "enp7s0" drop comment "Drop connections that VMs try to initiate with the internet"; -from nftables.conf and restart these services - -``` -sudo systemctl restart nftables libvirtd systemd-machined qemu-kvm.service ufw -``` - -#### install turn pointing to port 8080 - -### manage VMs - -You can use the `virsh` command to manage the virtual machines (start, stop, list them etc). - -Get help: - -``` -virsh help -``` - -Connect to `kubenode1` already running in the background. - -``` -sudo virsh console kubenode1 -``` - -List VMs: - -``` -sudo virsh list --all +apt -y install iptables; +dpkg -i debs-jammy/public/pool/main/d/docker-ce/docker-ce-cli_*.deb; +dpkg -i debs-jammy/public/pool/main/c/containerd.io/containerd.io_*.deb; +dpkg -i debs-jammy/public/pool/main/d/docker-ce/docker-ce_*.deb; +dpkg --configure -a; +' ``` ### From this point: From 0ecd0ba0af1c2275831177e938a1d7a9b4df1b5b Mon Sep 17 00:00:00 2001 From: Jan Schumacher Date: Thu, 18 Jan 2024 10:17:32 +0100 Subject: [PATCH 2/8] replacing example key with dummy; adding some more script logic for offline-vm-setup.sh to detect if VM(s) already exist --- ansible/hetzner-single-deploy.yml | 2 +- bin/offline-vm-setup.sh | 40 +++++++++++++++---- .../single_hetzner_machine_installation.md | 2 +- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/ansible/hetzner-single-deploy.yml b/ansible/hetzner-single-deploy.yml index 324200a10..69751645e 100644 --- a/ansible/hetzner-single-deploy.yml +++ b/ansible/hetzner-single-deploy.yml @@ -3,7 +3,7 @@ vars: artifact_hash: a6e0929c9a5f4af09655c9433bb56a4858ec7574 ubuntu_version: 22.04.3 - ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIODDzgw4BncNvSVOIraAXZYkkLn+iTo6ixkXXQ4lKZhL jan.schumacher@wire.com" + ssh_pubkey: "ssh-ed25519 AAAAC3Nz_CHANGEME_TE5AAAA_CHANGEME_cRpDu8vNelUH+changeme/OWB50Rk5GP jane.doe@example.com" tasks: - name: apt update apt: update_cache=yes force_apt_get=yes diff --git a/bin/offline-vm-setup.sh b/bin/offline-vm-setup.sh index 8f5d046f2..e54842ab3 100755 --- a/bin/offline-vm-setup.sh +++ b/bin/offline-vm-setup.sh @@ -3,7 +3,7 @@ set -Eeuo pipefail if [[ $EUID -eq 0 ]]; then - echo "Please don't run me as root" 1>&2 + msg "Please don't run me as root" 1>&2 exit 1 fi @@ -17,6 +17,7 @@ Non-interactive script for deploying standard set of Ubuntu Server VMs on a sing Script will create VMs with a sudo user "demo" and PW auth disabled. For SSH access, it'll use the 1st key found in the local user's .ssh/authorized_keys. If no key can be found, it will interactively ask for a key (and accept any input, so be careful). +The script will exit gracefully if VMs already exist. Default mode with no arguments creates seven libvirt VMs using cloud-init: * assethost @@ -38,7 +39,7 @@ EOF cleanup() { trap - SIGINT SIGTERM ERR EXIT pkill -f "http.server" - rm -rf "$DEPLOY_DIR"/nocloud/* + rm -r "$DEPLOY_DIR"/nocloud/ } msg() { @@ -86,9 +87,13 @@ nohup python3 -m http.server 3003 -d "$NOCLOUD_DIR" & if [[ -f ~/.ssh/authorized_keys && -s ~/.ssh/authorized_keys ]]; then SSHKEY=$(head -n 1 ~/.ssh/authorized_keys) - echo "Including local SSH key ""$SSHKEY"" for VM deployment" + msg "" + msg "######" + msg "" + msg "Including local SSH key ""$SSHKEY"" for VM deployment" + msg "" else - read -r -p "No local SSH keys for current user ""$USER"" found; please enter a key now: " SSHKEY + read -r -p "No local SSH keys for current user ""$USER"" found; please enter a vaild key now: " SSHKEY fi prepare_config() { @@ -140,8 +145,27 @@ create_vm () { } for VM in $VM_NAME; do - set -u - msg "Creating VM $VM ..." - create_vm "$VM" - sleep 20 + if sudo virsh list --all | grep -Fq "$VM"; then + msg "######" + msg "" + msg "ATTENTION - VM ""$VM"" already exists" + msg "" + continue + else + set -u + msg "Creating VM $VM ..." + create_vm "$VM" + sleep 20 + fi done + +msg "######" +msg "" +msg "Active VMs and DHCP leases:" +msg "" +sudo virsh net-dhcp-leases --network default +msg "######" +msg "" +msg "Host name 'ubuntu-server' is just a placeholder during VM installation." +msg "To look up the real host name, wait until VM deployment is finished, reboot the VM (twice) and execute 'sudo virsh net-dhcp-leases --network default' again" +msg "" diff --git a/offline/single_hetzner_machine_installation.md b/offline/single_hetzner_machine_installation.md index 0efa3e772..4b5814005 100644 --- a/offline/single_hetzner_machine_installation.md +++ b/offline/single_hetzner_machine_installation.md @@ -28,7 +28,7 @@ Take a look at the "vars:" section in wire-server-deploy/ansible/hetzner-single- vars: artifact_hash: a6e0929c9a5f4af09655c9433bb56a4858ec7574 ubuntu_version: 22.04.3 - ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIODDzgw4BncNvSVOIraAXZYkkLn+iTo6ixkXXQ4lKZhL jan.schumacher@wire.com" + ssh_pubkey: "ssh-ed25519 AAAAC3Nz_CHANGEME_TE5AAAA_CHANGEME_cRpDu8vNelUH+changeme/OWB50Rk5GP jane.doe@example.com" ``` ## Run ansible playbook for server bootstrapping From 113c9fa3bbf79559f425a82d979e8faa3ecd4f2a Mon Sep 17 00:00:00 2001 From: Jan Schumacher Date: Thu, 18 Jan 2024 10:44:32 +0100 Subject: [PATCH 3/8] offline-vm-setup.sh: only delete contents of NOCLOUD_DIR upon exit, not the directory itself --- bin/offline-vm-setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/offline-vm-setup.sh b/bin/offline-vm-setup.sh index e54842ab3..796a4744e 100755 --- a/bin/offline-vm-setup.sh +++ b/bin/offline-vm-setup.sh @@ -39,7 +39,7 @@ EOF cleanup() { trap - SIGINT SIGTERM ERR EXIT pkill -f "http.server" - rm -r "$DEPLOY_DIR"/nocloud/ + rm -r "$DEPLOY_DIR"/nocloud/* } msg() { From 7210a70e2ab342231bd5e39ccb8bd1235561f7e2 Mon Sep 17 00:00:00 2001 From: Jan Schumacher Date: Thu, 18 Jan 2024 12:15:59 +0100 Subject: [PATCH 4/8] updating readme, final chmod directive for ansible hetzner playbook, fix typos --- ansible/files/hetzner_server_sshd_config | 2 ++ ansible/hetzner-single-deploy.yml | 8 ++++++-- offline/single_hetzner_machine_installation.md | 10 +++++++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/ansible/files/hetzner_server_sshd_config b/ansible/files/hetzner_server_sshd_config index 354f182d4..59c66cac3 100644 --- a/ansible/files/hetzner_server_sshd_config +++ b/ansible/files/hetzner_server_sshd_config @@ -4,6 +4,8 @@ AcceptEnv LANG LC_* LogLevel verbose PrintMotd no +# Hardened algorithm configuration based on the output of 'ssh-audit' (https://github.com/jtesta/ssh-audit). + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512 Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com MACs hmac-sha2-512-etm@openssh.com diff --git a/ansible/hetzner-single-deploy.yml b/ansible/hetzner-single-deploy.yml index 69751645e..9ecaa9a86 100644 --- a/ansible/hetzner-single-deploy.yml +++ b/ansible/hetzner-single-deploy.yml @@ -99,7 +99,7 @@ state: directory owner: demo group: demo - mode: 0775 + mode: 0750 - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists stat: path: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz @@ -109,7 +109,7 @@ cmd: curl -fsSLo /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-{{ artifact_hash }}.tgz creates: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz when: not artifact_archive_file_check.stat.exists - - name: check if wire-server-deploy folderi contents exist + - name: check if wire-server-deploy folder contents exist stat: path: /home/demo/wire-server-deploy/containers-helm.tar register: artifact_folder_content_check @@ -128,6 +128,10 @@ cmd: curl -fsSLo /home/demo/wire-server-deploy/ubuntu.iso https://releases.ubuntu.com/jammy/ubuntu-{{ ubuntu_version }}-live-server-amd64.iso creates: /home/demo/wire-server-deploy/ubuntu.iso when: not iso_file_check.stat.exists + - name: set permissions inside wire-server-deploy + command: find /home/demo/wire-server-deploy -type d -exec chmod -c 0750 {} \; + register: chmod_result + changed_when: "chmod_result.stdout != \"\"" handlers: - name: sshd | restart diff --git a/offline/single_hetzner_machine_installation.md b/offline/single_hetzner_machine_installation.md index 4b5814005..16f9d9d15 100644 --- a/offline/single_hetzner_machine_installation.md +++ b/offline/single_hetzner_machine_installation.md @@ -19,7 +19,10 @@ Please note the public IP of the newly provisioned server, as it's used for the ## Pre-requisites (ansible) -Make sure to have ansible available on your local computer with a recent version (eg. 2.14.9 as of 2024-01). +Install ansible on your local computer, if not present yet. +``` +~ ❯ sudo apt install ansible ansible-core +``` ## Adjust playbook vars as needed @@ -33,9 +36,10 @@ Take a look at the "vars:" section in wire-server-deploy/ansible/hetzner-single- ## Run ansible playbook for server bootstrapping +Navigate to the ansible folder in wire-server-deploy and execute the playbook using valid vars as described above. ``` -cd wire-server-deploy/ansible -ansible-playbook hetzner-single-deploy.yml -i root@$HETZNER_IP, --diff +~ ❯ cd wire-server-deploy/ansible +~ ❯ ansible-playbook hetzner-single-deploy.yml -i root@$HETZNER_IP, --diff ``` Please note and include the trailing comma when invoking the playbook. From 118486676f8d7def0baca3150c7a7bf1fa30e0b2 Mon Sep 17 00:00:00 2001 From: Jan Schumacher Date: Fri, 19 Jan 2024 15:55:11 +0100 Subject: [PATCH 5/8] adding nftables rule for internal virbr0 traffic; updating offline-vm-setup.sh using static IPs and SSH key management, Readme updates, playbook fixes --- ansible/files/hetzner_server_nftables.conf | 3 +- ansible/hetzner-single-deploy.yml | 14 ++- bin/offline-vm-setup.sh | 111 ++++++++++-------- .../single_hetzner_machine_installation.md | 47 +++----- 4 files changed, 91 insertions(+), 84 deletions(-) diff --git a/ansible/files/hetzner_server_nftables.conf b/ansible/files/hetzner_server_nftables.conf index 8520ad47c..81ba1d79f 100644 --- a/ansible/files/hetzner_server_nftables.conf +++ b/ansible/files/hetzner_server_nftables.conf @@ -22,7 +22,8 @@ table inet filter { } chain FORWARD { type filter hook forward priority 0; - policy drop; + iifname virbr0 ip saddr 192.168.122.0/24 oifname virbr0 ip daddr 192.168.122.0/24 counter accept comment "allow all traffic between VMs" + jump block_definitions } chain OUTPUT { type filter hook output priority 0; diff --git a/ansible/hetzner-single-deploy.yml b/ansible/hetzner-single-deploy.yml index 9ecaa9a86..c6a151950 100644 --- a/ansible/hetzner-single-deploy.yml +++ b/ansible/hetzner-single-deploy.yml @@ -1,7 +1,7 @@ - hosts: all become: true vars: - artifact_hash: a6e0929c9a5f4af09655c9433bb56a4858ec7574 + artifact_hash: cc69eb754b69b9fbe13784de8cf6e7f6ef0f7ff6 ubuntu_version: 22.04.3 ssh_pubkey: "ssh-ed25519 AAAAC3Nz_CHANGEME_TE5AAAA_CHANGEME_cRpDu8vNelUH+changeme/OWB50Rk5GP jane.doe@example.com" tasks: @@ -29,6 +29,7 @@ - vim - wget - whois + - docker.io - qemu - qemu-kvm - qemu-utils @@ -62,7 +63,7 @@ - name: add default user accounts user: name: demo - groups: sudo, kvm + groups: sudo, kvm, docker uid: 900 state: present shell: /bin/bash @@ -103,6 +104,8 @@ - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists stat: path: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + get_checksum: False + get_md5: False register: artifact_archive_file_check - name: download wire-server-deploy archive shell: @@ -128,10 +131,9 @@ cmd: curl -fsSLo /home/demo/wire-server-deploy/ubuntu.iso https://releases.ubuntu.com/jammy/ubuntu-{{ ubuntu_version }}-live-server-amd64.iso creates: /home/demo/wire-server-deploy/ubuntu.iso when: not iso_file_check.stat.exists - - name: set permissions inside wire-server-deploy - command: find /home/demo/wire-server-deploy -type d -exec chmod -c 0750 {} \; - register: chmod_result - changed_when: "chmod_result.stdout != \"\"" + - name: set permissions inside wire-server-deploy via shell command (fails when using ansible directive) + shell: + cmd: chmod -R 0775 /home/demo/wire-server-deploy; chowm -R demo:demo /home/demo handlers: - name: sshd | restart diff --git a/bin/offline-vm-setup.sh b/bin/offline-vm-setup.sh index 796a4744e..4aef49c20 100755 --- a/bin/offline-vm-setup.sh +++ b/bin/offline-vm-setup.sh @@ -13,20 +13,26 @@ usage() { cat </dev/null } msg() { @@ -78,26 +84,37 @@ if [ ! -d "$NOCLOUD_DIR" ]; then fi if [[ -n "${DEPLOY_SINGLE_VM-}" ]]; then - VM_NAME="$2" + VM_NAME=("$2") + VM_IP=("192.168.122.$(shuf -i100-240 -n1)") else - VM_NAME="assethost kubenode1 kubenode2 kubenode3 ansnode1 ansnode2 ansnode3" + VM_NAME=(assethost kubenode1 kubenode2 kubenode3 ansnode1 ansnode2 ansnode3) + VM_IP=(192.168.122.10 192.168.122.21 192.168.122.22 192.168.122.23 192.168.122.31 192.168.122.32 192.168.122.33) fi -nohup python3 -m http.server 3003 -d "$NOCLOUD_DIR" & +if [[ -f "$HOME"/.ssh/authorized_keys && -s "$HOME"/.ssh/authorized_keys ]]; then + SSHKEY_HUMAN=$(head -n 1 ~/.ssh/authorized_keys) +else + read -r -p "No local SSH keys for current user ""$USER"" found; please enter a vaild key now: " SSHKEY_HUMAN +fi -if [[ -f ~/.ssh/authorized_keys && -s ~/.ssh/authorized_keys ]]; then - SSHKEY=$(head -n 1 ~/.ssh/authorized_keys) - msg "" - msg "######" - msg "" - msg "Including local SSH key ""$SSHKEY"" for VM deployment" - msg "" +if [[ -f "$HOME"/.ssh/id_ed25519 ]]; then + SSHKEY_DEMO=$(cat "$HOME"/.ssh/id_ed25519.pub) else - read -r -p "No local SSH keys for current user ""$USER"" found; please enter a vaild key now: " SSHKEY + ssh-keygen -t ed25519 -q -N '' -f "$HOME"/.ssh/id_ed25519 + SSHKEY_DEMO=$(cat "$HOME"/.ssh/id_ed25519.pub) fi +msg "" +msg "Including the following SSH Keys for VM deployment:" +msg "" +msg "Existing key from ~/.ssh/authorized_keys: ""$SSHKEY_HUMAN""" +msg "Local keypair key from ~/.ssh/id_ed25519: ""$SSHKEY_DEMO""" +msg "" + +nohup python3 -m http.server 3003 -d "$NOCLOUD_DIR" /dev/null 2>&1 & + prepare_config() { - VM_DIR=$NOCLOUD_DIR/$VM + VM_DIR=$NOCLOUD_DIR/${VM_NAME[i]} mkdir -p "$VM_DIR" touch "$VM_DIR"/{vendor-data,meta-data} cat >"$VM_DIR/user-data"< Date: Tue, 23 Jan 2024 10:17:59 +0100 Subject: [PATCH 6/8] updating readmes, fixing hetzner ansible playbook --- ansible/hetzner-single-deploy.yml | 83 ++++++------- bin/offline-vm-setup.sh | 5 +- offline/docs_ubuntu_22.04.md | 113 +++++------------- .../single_hetzner_machine_installation.md | 4 + 4 files changed, 80 insertions(+), 125 deletions(-) diff --git a/ansible/hetzner-single-deploy.yml b/ansible/hetzner-single-deploy.yml index c6a151950..9c5400ba3 100644 --- a/ansible/hetzner-single-deploy.yml +++ b/ansible/hetzner-single-deploy.yml @@ -94,46 +94,49 @@ owner: root group: root notify: nftables | restart - - name: create wire-server-deploy directory for demo user - file: - path: /home/demo/wire-server-deploy - state: directory - owner: demo - group: demo - mode: 0750 - - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists - stat: - path: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz - get_checksum: False - get_md5: False - register: artifact_archive_file_check - - name: download wire-server-deploy archive - shell: - cmd: curl -fsSLo /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-{{ artifact_hash }}.tgz - creates: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz - when: not artifact_archive_file_check.stat.exists - - name: check if wire-server-deploy folder contents exist - stat: - path: /home/demo/wire-server-deploy/containers-helm.tar - register: artifact_folder_content_check - - name: unpack wire-server-deploy archive - unarchive: - src: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz - dest: /home/demo/wire-server-deploy - remote_src: yes - when: not artifact_folder_content_check.stat.exists - - name: check if ubuntu iso exists - stat: - path: /home/demo/wire-server-deploy/ubuntu.iso - register: iso_file_check - - name: download ubuntu {{ ubuntu_version }} iso - shell: - cmd: curl -fsSLo /home/demo/wire-server-deploy/ubuntu.iso https://releases.ubuntu.com/jammy/ubuntu-{{ ubuntu_version }}-live-server-amd64.iso - creates: /home/demo/wire-server-deploy/ubuntu.iso - when: not iso_file_check.stat.exists - - name: set permissions inside wire-server-deploy via shell command (fails when using ansible directive) - shell: - cmd: chmod -R 0775 /home/demo/wire-server-deploy; chowm -R demo:demo /home/demo + - name: deploy wire artifact, ubuntu iso + block: + - name: create wire-server-deploy directory for demo user + file: + path: /home/demo/wire-server-deploy + state: directory + owner: demo + group: demo + mode: 0775 + - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists + stat: + path: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + get_checksum: False + get_md5: False + register: artifact_archive_file_check + - name: download wire-server-deploy archive + shell: + cmd: curl -fsSLo /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-{{ artifact_hash }}.tgz + creates: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + when: not artifact_archive_file_check.stat.exists + - name: check if wire-server-deploy folder contents exist + stat: + path: /home/demo/wire-server-deploy/containers-helm.tar + register: artifact_folder_content_check + - name: unpack wire-server-deploy archive + unarchive: + src: /home/demo/wire-server-deploy-static-{{ artifact_hash }}.tgz + dest: /home/demo/wire-server-deploy + remote_src: yes + when: not artifact_folder_content_check.stat.exists + - name: check if ubuntu iso exists + stat: + path: /home/demo/wire-server-deploy/ubuntu.iso + register: iso_file_check + - name: download ubuntu {{ ubuntu_version }} iso + shell: + cmd: curl -fsSLo /home/demo/wire-server-deploy/ubuntu.iso https://releases.ubuntu.com/jammy/ubuntu-{{ ubuntu_version }}-live-server-amd64.iso + creates: /home/demo/wire-server-deploy/ubuntu.iso + when: not iso_file_check.stat.exists + - name: set permissions inside wire-server-deploy via shell command (fails when using ansible directive) + shell: + cmd: sudo chmod -R 0775 /home/demo/wire-server-deploy; sudo chown -R demo:demo /home/demo + become_user: demo handlers: - name: sshd | restart diff --git a/bin/offline-vm-setup.sh b/bin/offline-vm-setup.sh index 4aef49c20..33576165e 100755 --- a/bin/offline-vm-setup.sh +++ b/bin/offline-vm-setup.sh @@ -128,6 +128,9 @@ autoinstall: enp1s0: dhcp4: no addresses: [${VM_IP[i]}/24] + routes: + - to: default + via: 192.168.122.1 storage: layout: sizing-policy: all @@ -173,7 +176,7 @@ create_vm () { for (( i=0; i<${#VM_NAME[@]}; i++ )); do if sudo virsh list --all | grep -Fq "${VM_NAME[i]}"; then msg "" - msg "ATTENTION - VM ""${VM_NAME[i]}"" already exists, exiting ..." + msg "ATTENTION - VM ""${VM_NAME[i]}"" already exists" msg "" continue else diff --git a/offline/docs_ubuntu_22.04.md b/offline/docs_ubuntu_22.04.md index fa85e4db2..eed2b71a3 100644 --- a/offline/docs_ubuntu_22.04.md +++ b/offline/docs_ubuntu_22.04.md @@ -1,80 +1,24 @@ -# How to install wire +# How to install wire (offline cluster) We have a pipeline in `wire-server-deploy` producing container images, static binaries, ansible playbooks, debian package sources and everything required to install Wire. -## Installing docker +## Preparations -Note: If you are using a Hetzner machine, docker should already be installed (you can check with `docker version`) and you can skip this section. +This section is a continuation of the demo cluster setup described in single_hetzner_machine_installation.md. At this point, the following prerequisites should be met: -On your machine (we call this the "admin host"), you need to have `docker` -installed (or any other compatible container runtime really, even though -instructions may need to be modified). See [how to install -docker](https://docker.com) for instructions. + * dedicated server system with sufficient resources + * deployment of ansible/hetzner-single-deploy.yml playbook + * deployment of seven libvirt VMs using bin/offline-vm-setup.sh shell script -On ubuntu 22.04, connected to the internet: +If the above prerequisites can not be met for whatever reason, here's a short summary in order to continue: -``` -sudo bash -c ' -set -eo pipefail; - -apt install docker.io; -systemctl enable docker; -systemctl start docker; -' -``` - -Ensure the user you are using for the install has permission to run docker, or add 'sudo' to the docker commands below. - -### Ensuring you can run docker without sudo: - -Run the following command to add your user to the docker group: - -``` -sudo usermod -aG docker $USER -``` - -Note: Replace $USER with your actual username as needed. - -Log out and log back in to apply the changes. Alternatively, you can run the following command to activate the changes in your current shell session: - -``` -newgrp docker -``` - -Verify that you can run Docker without sudo by running the following command: - -``` -docker version -``` - -If you see the curent docker version and no error, it means that Docker is now configured to run without sudo. - - -## Downloading and extracting the artifact - -Note: If you have followed the Ubuntu installation instructions (`single_hetzner_machine_installation.md`) before following this page, you already have a wire-server-deploy folder with an artifact extracted into it, and you can simply use that. - -Create a fresh workspace to download the artifacts: - -``` -$ cd ... # you pick a good location! -``` -Obtain the latest airgap artifact for wire-server-deploy. Please contact us to get it. - -Extract the above listed artifacts into your workspace: - -``` -$ wget https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-.tgz -$ tar xvzf wire-server-deploy-static-.tgz -``` -Where `` above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the above build job. -Extract this tarball. - -Make sure that the admin host can `ssh` into all the machines that you want to provision. Our docker container will use the `.ssh` folder and the `ssh-agent` of the user running the scripts. - -There's also a docker image containing the tooling inside this repo. + * dedicated system with root access running Ubuntu Server 22.04, Docker daemon, KVM/libvirt, reachable by public IP + * internal libvirt subnet (eg. 192.168.122.0/24), which is not able to forward traffic to public internet + * passwordless sudo user (eg. "demo") that is part of "docker" system group + * download and extraction of wire artifact archive, download of Ubuntu 22.04 Server ISO + * deployment of seven libvirt VMs as described in single_hetzner_machine_installation.md ## Making tooling available in your environment. @@ -172,9 +116,9 @@ Edit the 'kubenode' entries, and the 'assethost' entry like normal. Instead of creating separate cassandra, elasticsearch, and minio entries, create three 'ansnode' entries, similar to the following: ``` -ansnode1 ansible_host=172.16.0.132 -ansnode2 ansible_host=172.16.0.133 -ansnode3 ansible_host=172.16.0.134 +ansnode1 ansible_host=192.168.122.31 +ansnode2 ansible_host=192.168.122.32 +ansnode3 ansible_host=192.168.122.33 ``` ##### Updating Group Membership @@ -225,9 +169,9 @@ You'll need at least 3 `kubenode`s. 3 of them should be added to the additional nodes should only be added to the `[kube-node]` group. ### Setting up databases and kubernetes to talk over the correct (private) interface -If you are deploying wire on servers that are expected to use one interface to talk to the public, and a separate interface to talk amongst themselves, you will need to add "ip=" declarations for the private interface of each node. for instance, if the first kubenode was expected to talk to the world on 172.16.0.129, but speak to other wire services (kubernetes, databases, etc) on 192.168.0.2, you should edit its entry like the following: +If you are deploying wire on servers that are expected to use one interface to talk to the public, and a separate interface to talk amongst themselves, you will need to add "ip=" declarations for the private interface of each node. for instance, if the first kubenode was expected to talk to the world on 192.168.122.21/24, but speak to other wire services (kubernetes, databases, etc) on 192.168.0.2/24, you should edit its entry like the following: ``` -kubenode1 ansible_host=172.16.0.129 ip=192.168.0.2 +kubenode1 ansible_host=192.168.122.21 ip=192.168.0.2 ``` Do this for all of the instances. @@ -283,22 +227,23 @@ In order to automatically generate deeplinks, Edit the minio variables in `[mini ### Example hosts.ini -Here is an example `hosts.ini` file that was used in a succesfull example deployment, for reference. It might not be exactly what is needed for your deployment, but it should work for the KVM 7-machine deploy: +Here is an example `hosts.ini` file that was used in a succesfull example deployment, for reference. It might not be exactly what is needed for your deployment, but it should work for the libvirt deployment described in single_hetzner_machine_installation.md: ``` [all] -kubenode1 ansible_host=172.16.0.129 -kubenode2 ansible_host=172.16.0.130 -kubenode3 ansible_host=172.16.0.131 -ansnode1 ansible_host=172.16.0.132 -ansnode2 ansible_host=172.16.0.133 -ansnode3 ansible_host=172.16.0.134 -assethost ansible_host=172.16.0.128 +assethost ansible_host=192.168.122.10 +kubenode1 ansible_host=192.168.122.21 +kubenode2 ansible_host=192.168.122.22 +kubenode3 ansible_host=192.168.122.23 +ansnode1 ansible_host=192.168.122.31 +ansnode2 ansible_host=192.168.122.32 +ansnode3 ansible_host=192.168.122.33 [all:vars] ansible_user = demo -ansible_password = fai -ansible_become_password = fai +# uncomment below if using PWs instead of SSH keys +#ansible_password = fai +#ansible_become_password = fai [cassandra:vars] cassandra_network_interface = enp1s0 @@ -314,7 +259,7 @@ deeplink_title = "wire demo environment, example.com" [restund:vars] restund_uid = root -restund_allowed_private_network_cidrs='["172.16.0.0/24"]' +restund_allowed_private_network_cidrs='["192.168.122.0/24"]' [rmq-cluster:vars] rabbitmq_network_interface = enp1s0 diff --git a/offline/single_hetzner_machine_installation.md b/offline/single_hetzner_machine_installation.md index 54ca2344a..5d9da76dd 100644 --- a/offline/single_hetzner_machine_installation.md +++ b/offline/single_hetzner_machine_installation.md @@ -27,6 +27,8 @@ Take a look at the "vars:" section in wire-server-deploy/ansible/hetzner-single- ssh_pubkey: "ssh-ed25519 AAAAC3Nz_CHANGEME_TE5AAAA_CHANGEME_cRpDu8vNelUH+changeme/OWB50Rk5GP jane.doe@example.com" ``` +The variable 'artifact_hash' above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the build job. + ## Run ansible playbook for server bootstrapping Navigate to the ansible folder in wire-server-deploy and execute the playbook using valid vars as described above. @@ -39,6 +41,8 @@ Please note and include the trailing comma when invoking the playbook. Playbook The playbook will install baseline defaults (packages, firewall, SSH config, SSH key(s), user(s)), download & extract wire-server-deploy and download the specified ubuntu ISO. The playbook is written to be idempotent; eg. files won't be redownloaded as long as they already exist on the target host. Deploying a new version of "wire-server-deploy" is as easy as removing the folder from the target host and updating the "artifact_hash" variable in the playbook. +At this point it's recommended to reboot the server once. + ## Create VMs SSH into the target host as demo@$HETZNER_IP and execute wire-server-deploy/bin/offline-vm-setup.sh From 00f191de8c04246fadb62ffcf409eda5454becd0 Mon Sep 17 00:00:00 2001 From: Jan Schumacher <155645800+jschumacher-wire@users.noreply.github.com> Date: Tue, 23 Jan 2024 11:10:25 +0100 Subject: [PATCH 7/8] Update offline/single_hetzner_machine_installation.md Co-authored-by: Julia Longtin --- offline/single_hetzner_machine_installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/offline/single_hetzner_machine_installation.md b/offline/single_hetzner_machine_installation.md index 5d9da76dd..0c8a4a476 100644 --- a/offline/single_hetzner_machine_installation.md +++ b/offline/single_hetzner_machine_installation.md @@ -6,7 +6,7 @@ Bootstrapping a single dedicated Hetzner server for virtual machine deployment a ## Use the hetzner robot console to create a new server. -Select Ubuntu 22.04.2 on an ax101 dedicated server. If possible, please already provide a public key in the Hetzner console which can be used for ansible deployment. +Select Ubuntu 22.04.2 on an ax101 dedicated server. Make sure you provide a public key in the Hetzner console which can be used for ansible deployment. If not using Hetzner, for reference, the specs of the ax101 server are: From 7965ab05da835c789af9ed66cf6cfb3198ccf50e Mon Sep 17 00:00:00 2001 From: Jan Schumacher Date: Wed, 24 Jan 2024 16:29:42 +0100 Subject: [PATCH 8/8] reworking documentation for single deploy; adding nftables examples for ingress forwarding; adding more logic to offline-vm-deploy script; updating SSH pubkey to make use of existing infrastructure key --- ansible/hetzner-single-deploy.yml | 2 +- bin/offline-vm-setup.sh | 49 +++++-- offline/docs_ubuntu_22.04.md | 125 +++++++++++++++--- .../single_hetzner_machine_installation.md | 5 +- 4 files changed, 146 insertions(+), 35 deletions(-) diff --git a/ansible/hetzner-single-deploy.yml b/ansible/hetzner-single-deploy.yml index 9c5400ba3..227ec4dad 100644 --- a/ansible/hetzner-single-deploy.yml +++ b/ansible/hetzner-single-deploy.yml @@ -3,7 +3,7 @@ vars: artifact_hash: cc69eb754b69b9fbe13784de8cf6e7f6ef0f7ff6 ubuntu_version: 22.04.3 - ssh_pubkey: "ssh-ed25519 AAAAC3Nz_CHANGEME_TE5AAAA_CHANGEME_cRpDu8vNelUH+changeme/OWB50Rk5GP jane.doe@example.com" + ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDPTGTo1lTqd3Ym/75MRyQvj8xZINO/GI6FzfIadSe5c backend+hetzner-dedicated-operator@wire.com" tasks: - name: apt update apt: update_cache=yes force_apt_get=yes diff --git a/bin/offline-vm-setup.sh b/bin/offline-vm-setup.sh index 33576165e..e5057b3d0 100755 --- a/bin/offline-vm-setup.sh +++ b/bin/offline-vm-setup.sh @@ -26,13 +26,16 @@ For SSH access, it'll use two keys: The script will exit gracefully if VMs already exist. Default mode with no arguments creates seven libvirt VMs using cloud-init: - * assethost - IP: 192.168.122.10 - * kubenode1 - IP: 192.168.122.21 - * kubenode2 - IP: 192.168.122.22 - * kubenode3 - IP: 192.168.122.23 - * ansnode1 - IP: 192.168.122.31 - * ansnode2 - IP: 192.168.122.32 - * ansnode3 - IP: 192.168.122.33 + + | hostname | IP | RAM | VCPUs | disk space (thin provisioned) | + ------------------------------------------------------------------------------- + | assethost | 192.168.122.10 | 4096 MiB | 2 | 100 GB | + | kubenode1 | 192.168.122.21 | 8192 MiB | 6 | 100 GB | + | kubenode2 | 192.168.122.22 | 8192 MiB | 6 | 100 GB | + | kubenode3 | 192.168.122.23 | 8192 MiB | 6 | 100 GB | + | ansnode1 | 192.168.122.31 | 8192 MiB | 4 | 350 GB | + | ansnode2 | 192.168.122.32 | 8192 MiB | 4 | 350 GB | + | ansnode3 | 192.168.122.33 | 8192 MiB | 4 | 350 GB | Available options: -h, --help Print this help and exit @@ -86,9 +89,15 @@ fi if [[ -n "${DEPLOY_SINGLE_VM-}" ]]; then VM_NAME=("$2") VM_IP=("192.168.122.$(shuf -i100-240 -n1)") + VM_VCPU=(4) + VM_RAM=(8192) + VM_DISK=(100) else VM_NAME=(assethost kubenode1 kubenode2 kubenode3 ansnode1 ansnode2 ansnode3) VM_IP=(192.168.122.10 192.168.122.21 192.168.122.22 192.168.122.23 192.168.122.31 192.168.122.32 192.168.122.33) + VM_VCPU=(2 6 6 6 4 4 4) + VM_RAM=(4096 8192 8192 8192 8192 8192 8192) + VM_DISK=(100 100 100 100 350 350 350) fi if [[ -f "$HOME"/.ssh/authorized_keys && -s "$HOME"/.ssh/authorized_keys ]]; then @@ -162,9 +171,9 @@ create_vm () { sudo virt-install \ --name "${VM_NAME[i]}" \ - --ram 8192 \ - --disk path=/var/lib/libvirt/images/"${VM_NAME[i]}".qcow2,size=100 \ - --vcpus 4 \ + --ram "${VM_RAM[i]}" \ + --disk path=/var/lib/libvirt/images/"${VM_NAME[i]}".qcow2,size="${VM_DISK[i]}" \ + --vcpus "${VM_VCPU[i]}" \ --network bridge=virbr0 \ --graphics none \ --osinfo detect=on,require=off \ @@ -181,11 +190,23 @@ for (( i=0; i<${#VM_NAME[@]}; i++ )); do continue else set -u - msg "Creating VM ""${VM_NAME[i]}"" with IP ""${VM_IP[i]}"" ..." - create_vm "${VM_NAME[i]}" - msg "Writing IP and hostname to /etc/hosts ..." - echo """${VM_IP[i]}"" ""${VM_NAME[i]}""" | sudo tee -a /etc/hosts msg "" + msg "Creating VM ""${VM_NAME[i]}"" ..." + msg "IP: ""${VM_IP[i]}""" + msg "VCPUs: ""${VM_VCPU[i]}""" + msg "RAM: ""${VM_RAM[i]}"" MiB" + msg "DISK: ""${VM_DISK[i]}"" GB" + create_vm "${VM_NAME[i]}" + if grep -Fq "${VM_NAME[i]}" /etc/hosts; then + msg "" + msg "Updating existing record in /etc/hosts for ""${VM_NAME[i]}"" with IP ""${VM_IP[i]}""" + sudo sed -i -e "/${VM_NAME[i]}/c\\${VM_IP[i]} ${VM_NAME[i]}" /etc/hosts + else + msg "" + msg "Writing IP and hostname to /etc/hosts ..." + echo """${VM_IP[i]}"" ""${VM_NAME[i]}""" | sudo tee -a /etc/hosts + msg "" + fi sleep 20 fi done diff --git a/offline/docs_ubuntu_22.04.md b/offline/docs_ubuntu_22.04.md index eed2b71a3..db0716a00 100644 --- a/offline/docs_ubuntu_22.04.md +++ b/offline/docs_ubuntu_22.04.md @@ -4,21 +4,77 @@ We have a pipeline in `wire-server-deploy` producing container images, static binaries, ansible playbooks, debian package sources and everything required to install Wire. -## Preparations +## Installing docker -This section is a continuation of the demo cluster setup described in single_hetzner_machine_installation.md. At this point, the following prerequisites should be met: +Note: If you are using a Hetzner machine, docker should already be installed (you can check with `docker version`) and you can skip this section. - * dedicated server system with sufficient resources - * deployment of ansible/hetzner-single-deploy.yml playbook - * deployment of seven libvirt VMs using bin/offline-vm-setup.sh shell script +On your machine (we call this the "admin host"), you need to have `docker` +installed (or any other compatible container runtime really, even though +instructions may need to be modified). See [how to install +docker](https://docker.com) for instructions. -If the above prerequisites can not be met for whatever reason, here's a short summary in order to continue: +On ubuntu 22.04, connected to the internet: - * dedicated system with root access running Ubuntu Server 22.04, Docker daemon, KVM/libvirt, reachable by public IP - * internal libvirt subnet (eg. 192.168.122.0/24), which is not able to forward traffic to public internet - * passwordless sudo user (eg. "demo") that is part of "docker" system group - * download and extraction of wire artifact archive, download of Ubuntu 22.04 Server ISO - * deployment of seven libvirt VMs as described in single_hetzner_machine_installation.md +``` +sudo bash -c ' +set -eo pipefail; + +apt install docker.io; +systemctl enable docker; +systemctl start docker; +' +``` + +Ensure the user you are using for the install has permission to run docker, or add 'sudo' to the docker commands below. + +### Ensuring you can run docker without sudo: + +Run the following command to add your user to the docker group: + +``` +sudo usermod -aG docker $USER +``` + +Note: Replace $USER with your actual username as needed. + +Log out and log back in to apply the changes. Alternatively, you can run the following command to activate the changes in your current shell session: + +``` +newgrp docker +``` + +Verify that you can run Docker without sudo by running the following command: + +``` +docker version +``` + +If you see the curent docker version and no error, it means that Docker is now configured to run without sudo. + + +## Downloading and extracting the artifact + +Note: If you have followed the Ubuntu installation instructions (`single_hetzner_machine_installation.md`) before following this page, you already have a wire-server-deploy folder with an artifact extracted into it, and you can simply use that. + +Create a fresh workspace to download the artifacts: + +``` +$ cd ... # you pick a good location! +``` +Obtain the latest airgap artifact for wire-server-deploy. Please contact us to get it. + +Extract the above listed artifacts into your workspace: + +``` +$ wget https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-.tgz +$ tar xvzf wire-server-deploy-static-.tgz +``` +Where `` above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the above build job. +Extract this tarball. + +Make sure that the admin host can `ssh` into all the machines that you want to provision. Our docker container will use the `.ssh` folder and the `ssh-agent` of the user running the scripts. + +There's also a docker image containing the tooling inside this repo. ## Making tooling available in your environment. @@ -169,7 +225,7 @@ You'll need at least 3 `kubenode`s. 3 of them should be added to the additional nodes should only be added to the `[kube-node]` group. ### Setting up databases and kubernetes to talk over the correct (private) interface -If you are deploying wire on servers that are expected to use one interface to talk to the public, and a separate interface to talk amongst themselves, you will need to add "ip=" declarations for the private interface of each node. for instance, if the first kubenode was expected to talk to the world on 192.168.122.21/24, but speak to other wire services (kubernetes, databases, etc) on 192.168.0.2/24, you should edit its entry like the following: +If you are deploying wire on servers that are expected to use one interface to talk to the public, and a separate interface to talk amongst themselves, you will need to add "ip=" declarations for the private interface of each node. for instance, if the first kubenode was expected to talk to the world on 192.168.122.21, but speak to other wire services (kubernetes, databases, etc) on 192.168.0.2, you should edit its entry like the following: ``` kubenode1 ansible_host=192.168.122.21 ip=192.168.0.2 ``` @@ -227,7 +283,7 @@ In order to automatically generate deeplinks, Edit the minio variables in `[mini ### Example hosts.ini -Here is an example `hosts.ini` file that was used in a succesfull example deployment, for reference. It might not be exactly what is needed for your deployment, but it should work for the libvirt deployment described in single_hetzner_machine_installation.md: +Here is an example `hosts.ini` file that was used in a succesfull example deployment, for reference. It might not be exactly what is needed for your deployment, but it should work for the KVM 7-machine deploy: ``` [all] @@ -241,9 +297,8 @@ ansnode3 ansible_host=192.168.122.33 [all:vars] ansible_user = demo -# uncomment below if using PWs instead of SSH keys -#ansible_password = fai -#ansible_become_password = fai +ansible_password = fai +ansible_become_password = fai [cassandra:vars] cassandra_network_interface = enp1s0 @@ -605,16 +660,26 @@ ufw allow in on $OUTBOUNDINTERFACE proto tcp to any port 80; " ``` +If using nftables, incoming traffic for ports 80 and 443 can be forwarded with these commands: +``` +sudo bash -c " +set -xeo pipefail; + +nft add rule nat PREROUTING iif $OUTBOUNDINTERFACE tcp dport 80 dnat to $KUBENODEIP:31772 +nft add rule nat PREROUTING iif $OUTBOUNDINTERFACE tcp dport 443 dnat to $KUBENODEIP:31773 +" +``` + ###### Mirroring the public IP -`cert-manager` has a requirement on being able to reach the kubernetes on its external IP. This might be problematic, because in security concious environments, the external IP might not owned by any of the kubernetes hosts. +`cert-manager` has a requirement on being able to reach the kubernetes on its external IP. This might be problematic, because in security conscious environments, the external IP might not owned by any of the kubernetes hosts. On an IP Masquerading router, you can redirect outgoing traffic from your cluster, i.e. when the cluster asks to connect to your external IP, it will be routed to the kubernetes node inside the cluster. Make sure `PUBLICIPADDRESS` is exported (see above). ``` -export INTERNALINTERFACE=br0 +export INTERNALINTERFACE=virbr0 sudo bash -c " set -xeo pipefail; @@ -625,6 +690,17 @@ iptables -t nat -A PREROUTING -i $INTERNALINTERFACE -d $PUBLICIPADDRESS -p tcp - or add the corresponding rules to a config file (for UFW, /etc/ufw/before.rules) so they persist after rebooting. +Using nftables: + +``` +sudo bash -c " +set -xeo pipefail; + +nft add rule nat PREROUTING iif $INTERNALINTERFACE ip daddr $PUBLICIPADDRESS tcp dport 80 dnat to $KUBENODEIP:31772 +nft add rule nat PREROUTING iif $INTERNALINTERFACE ip daddr $PUBLICIPADDRESS tcp dport 443 dnat to $KUBENODEIP:31773 +" +``` + ### Incoming Calling Traffic Make sure `OUTBOUNDINTERFACE` and `PUBLICIPADDRESS` are exported (see above). @@ -648,6 +724,19 @@ iptables -t nat -A PREROUTING -d $PUBLICIPADDRESS -i $OUTBOUNDINTERFACE -p udp - or add the corresponding rules to a config file (for UFW, /etc/ufw/before.rules) so they persist after rebooting. +Using nftables: + +``` +sudo bash -c " +set -xeo pipefail; + +nft add rule nat PREROUTING iif $OUTBOUNDINTERFACE ip daddr $PUBLICIPADDRESS tcp dport 80 dnat to $RESTUND01IP:80 +nft add rule nat PREROUTING iif $OUTBOUNDINTERFACE ip daddr $PUBLICIPADDRESS udp dport 80 dnat to $RESTUND01IP:80 +nft add rule nat PREROUTING iif $OUTBOUNDINTERFACE ip daddr $PUBLICIPADDRESS udp dport 32768-60999 dnat to $RESTUND01IP:32768-60999 +" +``` + + ### Changing the TURN port FIXME: ansibleize this! diff --git a/offline/single_hetzner_machine_installation.md b/offline/single_hetzner_machine_installation.md index 0c8a4a476..7c7fc9593 100644 --- a/offline/single_hetzner_machine_installation.md +++ b/offline/single_hetzner_machine_installation.md @@ -15,7 +15,8 @@ If not using Hetzner, for reference, the specs of the ax101 server are: - 2 x 3.84 TB NVMe SSD Datacenter Edition (software RAID 1) - 1 GBit/s port -Please note the public IP of the newly provisioned server, as it's used for the ansible playbook run. +The main public IPv4 address of the Hetzner server to connect to with SSH / ansible can be found in the "Server" tab in the Hetzner Robot console, next to the Server Name. +As soon as the initial Hetzner server deployment is finished, we'll use Ansible to further provision the system. ## Adjust ansible playbook vars as needed @@ -24,7 +25,7 @@ Take a look at the "vars:" section in wire-server-deploy/ansible/hetzner-single- vars: artifact_hash: a6e0929c9a5f4af09655c9433bb56a4858ec7574 ubuntu_version: 22.04.3 - ssh_pubkey: "ssh-ed25519 AAAAC3Nz_CHANGEME_TE5AAAA_CHANGEME_cRpDu8vNelUH+changeme/OWB50Rk5GP jane.doe@example.com" + ssh_pubkey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDPTGTo1lTqd3Ym/75MRyQvj8xZINO/GI6FzfIadSe5c backend+hetzner-dedicated-operator@wire.com" ``` The variable 'artifact_hash' above is the hash of your deployment artifact, given to you by Wire, or acquired by looking at the build job.