diff --git a/.gitignore b/.gitignore index 2ec23f6..9bcdf6f 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,7 @@ typescript *~ *.secret +variables.yml +vault.yml +*.swp +hosts.lab diff --git a/1-create-jump.sh b/1-create-jump.sh index 110d9fe..2ea125c 100755 --- a/1-create-jump.sh +++ b/1-create-jump.sh @@ -2,36 +2,83 @@ source ./env.sh -if [ ! -d $VMS ] -then - echo "Creating $VMS" - mkdir -p $VMS +# verify a bunch of variables have contents, as a sanity check +if [ ! -v VMS ]; then + echo "${VMS} is not defined. Please set this in env.sh and try again." + exit 1 +fi +if [ ! -v CLUSTERKEY ]; then + echo "${CLUSTERKEY} is not defined. Please set this in env.sh and try again." + exit 2 +fi +if [ ! -v ORIGINALKEY ]; then + echo "${ORIGINALKEY} is not defined. Please set this in env.sh and try again." + exit 3 +fi +if [ ! -v VMROOTDISK ]; then + echo "${VMROOTDISK} is not defined. Please set this in env.sh and try again." + exit 4 +fi +if [ ! -v INITIALPASSWD ]; then + echo "${INITIALPASSWD} is not defined. Please set this in env.sh and try again." + exit 5 fi -for i in `cat hosts.jump|grep -v \\\\[`; -do +HOSTSFILE=hosts.jump +if [ ! -r "$HOSTSFILE" ]; then + echo "${HOSTSFILE} is missing." + exit 10 +fi - echo "########################################################################" - echo "[$i start]" +# create the VM directory if it doesn't exist +if [ ! -d "$VMS" ]; then + echo "Creating ${VMS}" + mkdir -p "$VMS" +fi - baseimage="$VMS/$i-base.qcow2" - image="$VMS/$i.qcow2" - dockerdisk="$VMS/$i-docker.qcow2" - glusterfsdisk="$VMS/$i-glusterfs.qcow2" +# sanity checks around the ssh key stuff +if [ ! -f "$CLUSTERKEY" ]; then # if the cluster sshkey doesn't exist, try to copy it from the original key + if [ ! -r "$ORIGINALKEY" ]; then # check to see if the original key exists and is readable + echo "${CLUSTERKEY} does not exist, and ${ORIGINALKEY} can't be found/read." + exit 6 + fi - echo "[Creating a $VMROOTDISK disk for root, $image]" - qemu-img create -f qcow2 $baseimage $VMROOTDISK - virt-resize --expand /dev/sda1 $RHEL_IMAGE $baseimage + # if CLUSTERKEY doesn't exist, but the ORIGINALKEY does, copy it. + cp "$ORIGINALKEY" "$CLUSTERKEY" + COPYRESULT=$? + if [ "$COPYRESULT" -ne 0 ]; then + echo "Error: Copying ${ORIGINALKEY} to create ${CLUSTERKEY} failed with: ${COPYRESULT}" + exit 7 + fi +fi +if [ ! -r "$CLUSTERKEY" ]; then # bomb out if the cluster sshkey isn't readable + echo "${CLUSTERKEY} is missing, or is not readbale. Exiting now." + exit 8 +fi - qemu-img create -f qcow2 -b $baseimage $image +# ensure that the permissions are correct for the new cluster key +chmod 644 "$CLUSTERKEY" - echo "[Customizing $i system]" - virt-customize -a $image --run-command 'yum remove cloud-init* -y' - virt-customize -a $image --root-password password:redhat - virt-customize -a $image --ssh-inject root:file:$WORKSPACE/vm_id_rsa.pub - virt-customize -a $image --hostname "$i" - echo "[$i done]" +# a safer, cleaner loop over the hosts file to read in lines, rather than words +grep -E '^[^\[ ]' < $HOSTSFILE | while IFS= read -r i +do + echo "########################################################################" + echo "[${i} start]" -done + BASEIMAGE="${VMS}/${i}-base.qcow2" + IMAGE="${VMS}/${i}.qcow2" + DOCKERDISK="${VMS}/${i}-docker.qcow2" + GLUSTERFSDISK="${VMS}/${i}-glusterfs.qcow2" + + echo "[Creating a ${VMROOTDISK} disk for root, ${IMAGE}]" + qemu-img create -f qcow2 "$BASEIMAGE" "$VMROOTDISK" + virt-resize --expand /dev/sda1 "$RHEL_IMAGE" "$BASEIMAGE" + qemu-img create -f qcow2 -b "$BASEIMAGE" "$IMAGE" -exit + echo "[Customizing ${i} system]" + virt-customize -a "$IMAGE" --run-command 'yum remove cloud-init* -y' + virt-customize -a "$IMAGE" --root-password password:"$INITIALPASSWD" + virt-customize -a "$IMAGE" --ssh-inject root:file:"$CLUSTERKEY" + virt-customize -a "$IMAGE" --hostname "$i" + echo "[${i} done]" +done diff --git a/1-create-node.sh b/1-create-node.sh index 0c11cd5..d1a403d 100755 --- a/1-create-node.sh +++ b/1-create-node.sh @@ -2,42 +2,89 @@ source ./env.sh -if [ ! -d $VMS ] -then - echo "Creating $VMS" - mkdir -p $VMS +# verify a bunch of variables have contents, as a sanity check +if [ ! -v VMS ]; then + echo "${VMS} is not defined. Please set this in env.sh and try again." + exit 1 +fi +if [ ! -v CLUSTERKEY ]; then + echo "${CLUSTERKEY} is not defined. Please set this in env.sh and try again." + exit 2 +fi +if [ ! -v ORIGINALKEY ]; then + echo "${ORIGINALKEY} is not defined. Please set this in env.sh and try again." + exit 3 +fi +if [ ! -v VMROOTDISK ]; then + echo "${VMROOTDISK} is not defined. Please set this in env.sh and try again." + exit 4 +fi +if [ ! -v INITIALPASSWD ]; then + echo "${INITIALPASSWD} is not defined. Please set this in env.sh and try again." + exit 5 fi -for i in `cat hosts.addnode|grep -v \\\\[`; -do +HOSTSFILE=hosts.addnode +if [ ! -r "$HOSTSFILE" ]; then + echo "${HOSTSFILE} is missing." + exit 10 +fi - echo "########################################################################" - echo "[$i start]" +# create the VM directory if it doesn't exist +if [ ! -d "$VMS" ]; then + echo "Creating ${VMS}" + mkdir -p "$VMS" +fi - baseimage="$VMS/$i-base.qcow2" - image="$VMS/$i.qcow2" - dockerdisk="$VMS/$i-docker.qcow2" - glusterfsdisk="$VMS/$i-glusterfs.qcow2" +# sanity checks around the ssh key stuff +if [ ! -f "$CLUSTERKEY" ]; then # if the cluster sshkey doesn't exist, try to copy it from the original key + if [ ! -r "$ORIGINALKEY" ]; then # check to see if the original key exists and is readable + echo "${CLUSTERKEY} does not exist, and ${ORIGINALKEY} can't be found/read." + exit 6 + fi - echo "[Creating a $VMROOTDISK disk for root, $image]" - qemu-img create -f qcow2 $baseimage $VMROOTDISK - virt-resize --expand /dev/sda1 $RHEL_IMAGE $baseimage + # if CLUSTERKEY doesn't exist, but the ORIGINALKEY does, copy it. + cp "$ORIGINALKEY" "$CLUSTERKEY" + COPYRESULT=$? + if [ "$COPYRESULT" -ne 0 ]; then + echo "Error: Copying ${ORIGINALKEY} to create ${CLUSTERKEY} failed with: ${COPYRESULT}" + exit 7 + fi +fi +if [ ! -r "$CLUSTERKEY" ]; then # bomb out if the cluster sshkey isn't readable + echo "${CLUSTERKEY} is missing, or is not readbale. Exiting now." + exit 8 +fi - qemu-img create -f qcow2 -b $baseimage $image +# ensure that the permissions are correct for the new cluster key +chmod 644 "$CLUSTERKEY" - echo "[Creating a $VMDOCKERDISK disk for docker, $dockerdisk]" - qemu-img create -f raw $dockerdisk $VMDOCKERDISK +# a safer, cleaner loop over the hosts file to read in lines, rather than words +grep -E '^[^\[ ]' < $HOSTSFILE | while IFS= read -r i +do + echo "########################################################################" + echo "[${i} start]" - echo "[Creating a $VMGLUSTERFSDISK disk for glusterfs, $glusterfsdisk]" - qemu-img create -f raw $glusterfsdisk $VMGLUSTERFSDISK + BASEIMAGE="${VMS}/${i}-base.qcow2" + IMAGE="${VMS}/${i}.qcow2" + DOCKERDISK="${VMS}/${i}-docker.qcow2" + GLUSTERFSDISK="${VMS}/${i}-glusterfs.qcow2" - echo "[Customizing $i system]" - virt-customize -a $image --run-command 'yum remove cloud-init* -y' - virt-customize -a $image --root-password password:redhat - virt-customize -a $image --ssh-inject root:file:$WORKSPACE/vm_id_rsa.pub - virt-customize -a $image --hostname "$i" - echo "[$i done]" + echo "[Creating a ${VMROOTDISK} disk for root, ${IMAGE}]" + qemu-img create -f qcow2 "$BASEIMAGE" "$VMROOTDISK" + virt-resize --expand /dev/sda1 "$RHEL_IMAGE" "$BASEIMAGE" + qemu-img create -f qcow2 -b "$BASEIMAGE" "$IMAGE" -done + echo "[Creating a ${VMDOCKERDISK} disk for docker, ${DOCKERDISK}]" + qemu-img create -f raw "$DOCKERDISK" "$VMDOCKERDISK" + + echo "[Creating a ${VMGLUSTERFSDISK} disk for glusterfs, ${GLUSTERFSDISK}]" + qemu-img create -f raw "$GLUSTERFSDISK" "$VMGLUSTERFSDISK" -exit + echo "[Customizing ${i} system]" + virt-customize -a "$IMAGE" --run-command 'yum remove cloud-init* -y' + virt-customize -a "$IMAGE" --root-password password:"$INITIALPASSWD" + virt-customize -a "$IMAGE" --ssh-inject root:file:"$CLUSTERKEY" + virt-customize -a "$IMAGE" --hostname "$i" + echo "[${i} done]" +done diff --git a/1-create.sh b/1-create.sh index b7ceeac..e19edf1 100755 --- a/1-create.sh +++ b/1-create.sh @@ -2,42 +2,89 @@ source ./env.sh -if [ ! -d $VMS ] -then - echo "Creating $VMS" - mkdir -p $VMS +# verify a bunch of variables have contents, as a sanity check +if [ ! -v VMS ]; then + echo "${VMS} is not defined. Please set this in env.sh and try again." + exit 1 +fi +if [ ! -v CLUSTERKEY ]; then + echo "${CLUSTERKEY} is not defined. Please set this in env.sh and try again." + exit 2 +fi +if [ ! -v ORIGINALKEY ]; then + echo "${ORIGINALKEY} is not defined. Please set this in env.sh and try again." + exit 3 +fi +if [ ! -v VMROOTDISK ]; then + echo "${VMROOTDISK} is not defined. Please set this in env.sh and try again." + exit 4 +fi +if [ ! -v INITIALPASSWD ]; then + echo "${INITIALPASSWD} is not defined. Please set this in env.sh and try again." + exit 5 fi -for i in `cat hosts|grep -v \\\\[`; -do +HOSTSFILE=hosts +if [ ! -r "$HOSTSFILE" ]; then + echo "${HOSTSFILE} is missing." + exit 10 +fi - echo "########################################################################" - echo "[$i start]" +# create the VM directory if it doesn't exist +if [ ! -d "$VMS" ]; then + echo "Creating ${VMS}" + mkdir -p "$VMS" +fi - baseimage="$VMS/$i-base.qcow2" - image="$VMS/$i.qcow2" - dockerdisk="$VMS/$i-docker.qcow2" - glusterfsdisk="$VMS/$i-glusterfs.qcow2" +# sanity checks around the ssh key stuff +if [ ! -f "$CLUSTERKEY" ]; then # if the cluster sshkey doesn't exist, try to copy it from the original key + if [ ! -r "$ORIGINALKEY" ]; then # check to see if the original key exists and is readable + echo "${CLUSTERKEY} does not exist, and ${ORIGINALKEY} can't be found/read." + exit 6 + fi - echo "[Creating a $VMROOTDISK disk for root, $image]" - qemu-img create -f qcow2 $baseimage $VMROOTDISK - virt-resize --expand /dev/sda1 $RHEL_IMAGE $baseimage + # if CLUSTERKEY doesn't exist, but the ORIGINALKEY does, copy it. + cp "$ORIGINALKEY" "$CLUSTERKEY" + COPYRESULT=$? + if [ "$COPYRESULT" -ne 0 ]; then + echo "Error: Copying ${ORIGINALKEY} to create ${CLUSTERKEY} failed with: ${COPYRESULT}" + exit 7 + fi +fi +if [ ! -r "$CLUSTERKEY" ]; then # bomb out if the cluster sshkey isn't readable + echo "${CLUSTERKEY} is missing, or is not readbale. Exiting now." + exit 8 +fi - qemu-img create -f qcow2 -b $baseimage $image +# ensure that the permissions are correct for the new cluster key +chmod 644 "$CLUSTERKEY" - echo "[Creating a $VMDOCKERDISK disk for docker, $dockerdisk]" - qemu-img create -f raw $dockerdisk $VMDOCKERDISK +# a safer, cleaner loop over the hosts file to read in lines, rather than words +grep -E '^[^\[ ]' < $HOSTSFILE | while IFS= read -r i +do + echo "########################################################################" + echo "[${i} start]" - echo "[Creating a $VMGLUSTERFSDISK disk for glusterfs, $glusterfsdisk]" - qemu-img create -f raw $glusterfsdisk $VMGLUSTERFSDISK + BASEIMAGE="${VMS}/${i}-base.qcow2" + IMAGE="${VMS}/${i}.qcow2" + DOCKERDISK="${VMS}/${i}-docker.qcow2" + GLUSTERFSDISK="${VMS}/${i}-glusterfs.qcow2" - echo "[Customizing $i system]" - virt-customize -a $image --run-command 'yum remove cloud-init* -y' - virt-customize -a $image --root-password password:redhat - virt-customize -a $image --ssh-inject root:file:$WORKSPACE/vm_id_rsa.pub - virt-customize -a $image --hostname "$i" - echo "[$i done]" + echo "[Creating a ${VMROOTDISK} disk for root, ${IMAGE}]" + qemu-img create -f qcow2 "$BASEIMAGE" "$VMROOTDISK" + virt-resize --expand /dev/sda1 "$RHEL_IMAGE" "$BASEIMAGE" + qemu-img create -f qcow2 -b "$BASEIMAGE" "$IMAGE" -done + echo "[Creating a ${VMDOCKERDISK} disk for docker, ${DOCKERDISK}]" + qemu-img create -f raw "$DOCKERDISK" "$VMDOCKERDISK" + + echo "[Creating a ${VMGLUSTERFSDISK} disk for glusterfs, ${GLUSTERFSDISK}]" + qemu-img create -f raw "$GLUSTERFSDISK" "$VMGLUSTERFSDISK" -exit + echo "[Customizing ${i} system]" + virt-customize -a "$IMAGE" --run-command 'yum remove cloud-init* -y' + virt-customize -a "$IMAGE" --root-password password:"$INITIALPASSWD" + virt-customize -a "$IMAGE" --ssh-inject root:file:"$CLUSTERKEY" + virt-customize -a "$IMAGE" --hostname "$i" + echo "[${i} done]" +done diff --git a/2-build-jump.sh b/2-build-jump.sh index 1ef4c7c..1a5efbd 100755 --- a/2-build-jump.sh +++ b/2-build-jump.sh @@ -20,10 +20,10 @@ do echo "[dry-run install $i w/ mac ${MACADDRESS[$i]}" virt-install --ram $VMRAM_JUMP --vcpus 4 --os-variant rhel7 --disk path=$image,device=disk,bus=virtio,format=qcow2 \ - --noautoconsole --vnc --name $i --dry-run --cpu Skylake-Client,+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ + --noautoconsole --vnc --name $i --dry-run --cpu ${CPUMODEL},+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ --print-xml > $VMS/$i.xml # You may also need to change the CPU depending on the hypervisor's CPU -# --noautoconsole --vnc --name $i --dry-run --cpu Skylake-Client,+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ +# --noautoconsole --vnc --name $i --dry-run --cpu ${CPUMODEL},+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ echo "[define $i]" virsh define --file $VMS/$i.xml diff --git a/2-build-node.sh b/2-build-node.sh index b47e625..3f981f7 100755 --- a/2-build-node.sh +++ b/2-build-node.sh @@ -20,10 +20,10 @@ do echo "[dry-run install $i w/ mac ${MACADDRESS[$i]}" virt-install --ram $VMRAM_OCP --vcpus 4 --os-variant rhel7 --disk path=$image,device=disk,bus=virtio,format=qcow2 \ - --noautoconsole --vnc --name $i --dry-run --cpu Skylake-Client,+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ + --noautoconsole --vnc --name $i --dry-run --cpu ${CPUMODEL},+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ --print-xml > $VMS/$i.xml # You may also need to change the CPU depending on the hypervisor's CPU -# --noautoconsole --vnc --name $i --dry-run --cpu Skylake-Client,+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ +# --noautoconsole --vnc --name $i --dry-run --cpu ${CPUMODEL},+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ echo "[define $i]" virsh define --file $VMS/$i.xml diff --git a/2-build.sh b/2-build.sh index 01f95f6..2518065 100755 --- a/2-build.sh +++ b/2-build.sh @@ -20,10 +20,10 @@ do echo "[dry-run install $i w/ mac ${MACADDRESS[$i]}" virt-install --ram $VMRAM_OCP --vcpus 4 --os-variant rhel7 --disk path=$image,device=disk,bus=virtio,format=qcow2 \ - --noautoconsole --vnc --name $i --dry-run --cpu Skylake-Client,+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ + --noautoconsole --vnc --name $i --dry-run --cpu ${CPUMODEL},+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ --print-xml > $VMS/$i.xml # You may also need to change the CPU depending on the hypervisor's CPU -# --noautoconsole --vnc --name $i --dry-run --cpu Skylake-Client,+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ +# --noautoconsole --vnc --name $i --dry-run --cpu ${CPUMODEL},+vmx --network bridge=${BRIDGE},mac=${MACADDRESS[$i]} \ echo "[define $i]" virsh define --file $VMS/$i.xml diff --git a/5-cluster.sh b/5-cluster.sh index 9e2639f..238f820 100755 --- a/5-cluster.sh +++ b/5-cluster.sh @@ -2,49 +2,21 @@ source ./env.sh -echo "copied sample configuration hosts.ocp, hosts and 3-keys.sh to the jumpstation" -scp hosts.ocp root@jump.$DOMAIN:~/ -scp hosts root@jump.$DOMAIN:~/ -scp 3-keys.sh root@jump.$DOMAIN:~/ -echo "Do this:" -echo " $ ssh root@xjump.$DOMAIN" -echo " jump# ssh-keygen" -echo " jump# bash ./3-keys.sh" -echo " jump# ansible-playbook -i hosts.ocp /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml" -echo " jump# ansible-playbook -i hosts.ocp /usr/share/ansible/openshift-ansible/playbooks/deploy-cluster.yml" -echo " jump# ssh root@master0.gwiki.org \"htpasswd -b /etc/origin/master/htpasswd marc SekretPassword\"" -echo " jump# oadm policy add-role-to-user system:registry marc (optional)" -exit -#### -# Below are random notes -#### - -https://access.redhat.com/documentation/en-us/openshift_container_platform/3.5/html/installation_and_configuration/installing-a-cluster#what-s-next-2 - -Once the cluster is created, - -ssh root@master0.$DOMAIN and do: - - htpasswd -b /etc/origin/master/htpasswd marc SekretPassword - oadm policy add-role-to-user system:registry marc - - -https://access.redhat.com/documentation/en-us/openshift_container_platform/3.5/html/installation_and_configuration/setting-up-the-registry#install-config-registry-overview +echo "copying sample configuration hosts.ocp, hosts and 3-keys.sh to the jumpstation" +# TODO: this should be updated to use a real jump machine hostname from the list of hosts, and not assume 'jump' is valid +#scp hosts.ocp root@jump.$DOMAIN:~/ +#scp hosts root@jump.$DOMAIN:~/ +#scp 3-keys.sh root@jump.$DOMAIN:~/ +scp -r * root@jump.$DOMAIN:~/ +echo "Now go do these steps:" +echo " hypervisor$ ssh root@jump.$DOMAIN" +echo " jump# ssh-keygen # accept the defaults" +echo " jump# bash ./3-keys.sh" +echo " jump# ansible-playbook -i hosts.ocp /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml" +echo " jump# ansible-playbook -i hosts.ocp /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml" +# TODO: master node name could be pulled from the list of masters in the envs/hosts/etc +echo " jump# ssh root@ \"htpasswd -b /etc/origin/master/htpasswd \" " +# uncomment this out when we fix why oadm isn't on the jump host. Should this be run on the master instead of the jump? +#echo " jump# oadm policy add-role-to-user system:registry (optional) -for non production use (may not have to do) - -$ sudo chown 1001:root -$ oadm registry --service-account=registry \ - --config=/etc/origin/master/admin.kubeconfig \ - --images='registry.access.redhat.com/openshift3/ose-${component}:${version}' \ - --mount-host= - -https://access.redhat.com/documentation/en-us/openshift_container_platform/3.5/html/installation_and_configuration/setting-up-a-router#install-config-router-overview - -oadm policy add-cluster-role-to-user \ - cluster-reader \ - system:serviceaccount:default:router - -oadm router --replicas= --service-account=router - -https://master0.$DOMAIN:8443/ +exit diff --git a/README.md b/README.md index 2aaa870..551860f 100644 --- a/README.md +++ b/README.md @@ -1,96 +1,70 @@ # openshift-home-lab -Sample script to build a KVM environment for OpenShift 3.11 in my home lab. +Sample script to build a KVM environment for OpenShift 3.11 in a lab environment. This is not intended for production use, but rather for testing. -I needed to setup a local OpenShift environment for experimentation on a -local desktop with 32GB memory. These config files work for me to do a -simple install of OpenShift. - -If you want to do the same, here are some scripts and configuration -files to help get you going. - -This is still a bit rough and requires some editing of files. Please -send patches/PR. - -Thanks to mmagnani, ruchika, and MarcNo for kickstarting the initial work. +Thanks to mmagnani, ruchika, @hupiper, and @MarcNo for kickstarting the initial work. ## What do you get -* Three RHEL7.5 VMs running in KVM (1 master, 1 node, 1 jump). You can add - more as desired as part of the initial installation, or after you get - things up and running. The more VMs you turn on, the more memory you will - need. I have more than one server serving some of my nodes (more on that - below). +* Three RHEL7.5 VMs running in KVM (1 master, 1 node, 1 jump). + * You can add more as desired as part of the initial installation, or after you get things up and running. * Registered and appropriate subscriptions attached * required RPMs installed, including atomic-openshift-installer * docker installed and storage configured -* ready to install the OpenShift cluster from the jump VM. +* an OpenShift cluster (installed from the jump VM) ## Requirements -* Access to DNS server. I'm using two personal domains hosted on - godaddy.com. -* Access to DHCP server. I'm using my home tp-link router and tie - specific IP addresses to known mac addresses. ie: VMs always get the - same IP address from DHCP. -* RHEL 7 KVM hypervisor host -* `rhel-server-7.5-x86_64-kvm.qcow2` (from https://access.redhat.com/downloads/) -* 1 NIC +* Access to a DNS server. (Either public or internal) +* Access to a DHCP server. (Anything that can provide static leases) +* RHEL 7 KVM hypervisor host (or compatible, like Fedora/CentOS) +* `rhel-server-7.5-x86_64-kvm.qcow2` from: + * From [Redhat download section](https://access.redhat.com/downloads/) > Red Hat Enterprise Linux > the KVM guest image +* 1 free NIC on the host -** If you are looking at this the first time, and wondering what you need to know to -get up and running, this is the place to start reading. ** +**If you are looking at this the first time, and wondering what you need to know to get up and running, this is the place to start reading.** -These instructions assume you are installing a jump server VM in addition to -the master0 and node0 nodes. The install scripts for the jump server are -separate to allow you to skip the jump server if you choose to. +### 1. Pull a local copy of the scripts -### 1. Clone or fork this git repo -``` -$ git clone https://github.com/hupiper/openshift-home-lab.git -$ cd openshift-home-lab -``` +Clone this git repo to your hypervisor, or fork it and clone your own fork of this git repo. ### 2. Create working directories +Create two working directories: one for ISOs (specifically the rhel kvm image), and one to store your local VM disks and configs. ``` mkdir ~/ocp/VMs mkdir ~/ISOs ``` +These can be anywhere, but the examples here correlate to the default configuration in the `env.sh` configuration. ### 3. Edit hosts file -There are several host files in this repo. The first one you want to look at is -hosts. After the [ocp] line, make sure the next two lines are the FQDN's for -your master and your node. +There are several host files in this repo. The first one you want to look at is hosts. After the [ocp] line, make sure the next two lines are the FQDN's for your master(s) and your node(s): ``` [ocp] -master0.domain.com -node0.domain.com +master0.example.com +node0.example.com ``` -Edit the hosts.jump file: +Edit the hosts.jump file to include the FQDN of your new jump host: ``` [jump] -jump.domain.com +jump.example.com ``` +These instructions assume you are installing a jump VM in addition to a master and node VMs. The install scripts for the jump are separate to allow you to skip the jump server if you choose to. + ### 4. Edit env.sh -When you look at the env.sh file, you'll notice that the MAC addresses are -already set up for your VMs. You need to take those MACs and add them to your -router (in my case) to pin IP addresses to those MACs. You'll also use those IP -addresses in DNS. +When you look at the `env.sh` file, you'll notice that the MAC addresses are already set up for your VMs. You need to take those MACs and add them to the - - DOMAIN - the domain name to use for the hosts (ie: domain.com) + - DOMAIN - the domain name to use for the hosts (ie: example.com) - MACADDRESS - MAC addresses for your VMs (be unique) - - OCPDOMAIN - the domain name for the cluster (ie: ocp.ocpdomain.com, - \*.apps.ocpdomain.com\) + - OCPDOMAIN - the domain name for the cluster (ie: ocp.ocpexample.com, \*.apps.ocpexample.com\) - WORKSPACE, VMS - where VMs, etc are stored - ISOS - where your ISOs can be found - RHEL_IMAGE - your rhel-server-7.5-x86_64-kvm.qcow2 image - BRIDGE - which bridge to use. See Network Notes below ### 5. Add DNS A records for your domains -godaddy hosts my DNS records, so I don't need to hack `/etc/resolv.conf`. But -you will need to create/update your DNS A records to point to the local -addresses so it looks like this. eg: +You can use public DNS like godaddy to host your DNS records; or something internal like dnsmasq so I don't need to hack `/etc/resolv.conf`. But you will need to create/update your DNS A records to point to the local addresses so it looks like this. eg: $ nslookup jump.$DOMAIN Server: 8.8.8.8 @@ -100,27 +74,26 @@ addresses so it looks like this. eg: Name: jump.$DOMAIN Address: 192.168.88.99 -Your A record for domain.com would be: +Your A record for example.com would be: ``` Host * Points to * TTL jump 192.168.88.99 1 hour ``` -Also setup wildcard DNS entry for ocp.$OCPDOMAIN, \*.apps.$OCPDOMAIN to -point to the master0.$DOMAIN IP address. +Also setup wildcard DNS entry for ocp.$OCPDOMAIN, \*.apps.$OCPDOMAIN to point to the master0.$DOMAIN IP address. ### 6. Update your DHCP server - -Tie those specific IP addresses defined in DNS to known mac addresses. ie: -VMs always get the same IP address from DHCP. +Tie those specific IP addresses defined in DNS to known mac addresses using static DHCP leases. We need the VMs to always get the same IP address. ### 7. Add required packages to your hosts -I'm assuming you are using RHEL 7.5 server as the host OS, but this also works -with Fedora 28/29 (use DNF instead of yum). +Install the required pacakages on your host: ``` $ sudo yum install -y ansible $ sudo yum install -y qemu-kvm libvirt libvirt-python libguestfs-tools virt-install +``` +Start and enable the libvirtd virtualization daemon: +``` $ sudo systemctl enable libvirtd $ sudo systemctl start libvirtd ``` @@ -132,36 +105,22 @@ $ ssh-keygen -f /home/user/.ssh/id_rsa -t rsa -N '' $ cp ~/.ssh/id_rsa.pub ~/ocp/vm_id_rsa.pub ``` ### 9. Check CPU model in 2-build.sh script -1-create.sh creates the VMs for your nodes. 2-build.sh configures them to run -in your KVM environment. The virt-install command in this file has a --cpu -variable that is set to Skylake-client, which is the model of Intel CPU I am using -in my server. To see what model CPU you are using, use the virsh capabilities -command. +1-create.sh creates the VMs for your nodes. 2-build.sh configures them to run in your KVM environment. The virt-install command in this file has a --cpu variable that is read from the `env.sh` configuration. To see what model CPU you are using, use the following command. ``` -$ virsh capabilities - - - - 42d0404d-dd37-4be6-8703-336ddad75b67 - - x86_64 - IvyBridge-IBRS - Intel +$ virsh capabilities | grep -i model | head -n1 ``` -You should see the model in the first several lines of XML that is generated from -that command. If it is not Skylake-client, you'll need to edit the command in -2-build.sh for the model you have, which for the example above is IvyBridge-IBRS. +It should output a single line of XML, showing the model, like: +```Skylake-client``` +Use the value in between the `model` tags in the `env.sh` configuration file. ### 10. Set up Linux Bridging -We are using Linux bridging to connect the physical NIC to the VMs in the hypervisor. -The bridge is called LondonBridge (it was that way when I forked - you can use -br0 or something similar if you like, but you'll have to make sure all of the -scripts are using that name too). I'm assuming you are using the Network Manager. +We are using Linux bridging to connect the physical NIC to the VMs in the hypervisor. The bridge is called LondonBridge (it was that way when I forked - you can use br0 or something similar if you like, but you'll have to make sure all of the scripts are using that name too). This process expects/uses NetworkManager. If you have it disabled, create this configuration yourself. -Add this line to /etc/sysctl.conf: -` net.ipv4.ip_forward=1` +We need ipv4 forwarding turned on persistently for the host, so add this line to /etc/sysctl.conf: +`net.ipv4.ip_forward=1` +Replace in the following with the host interface you want to use for external connectivity for the cluster. (Something like enp0s25, eno2, eth1, etc) ``` $ sudo nmcli con add type bridge con-name LondonBridge ifname LondonBridge $ sudo nmcli con add type ethernet con-name UK-slave ifname enp0s25 master LondonBridge @@ -190,40 +149,50 @@ $ sudo virsh net-autostart default ### 11. Edit variables.yml -You need to set the openshift_subscription_pool for your own Red Hat account. -Use this command will find your pool id: +You need to set the `openshift_subscription_pool` for your own Red Hat account. From a RHEL system with an active subscription, use this command will find your pool id: `subscription-manager list --all --available --matches "*openshift*"` +(You can also find your subscription info in the Red Hat portal.) + Make variable.yml look something like this: `openshift_subscription_pool: 8a85f98c63842fef01647d9012060465` ### 12. Create ansible-vault vault.yml -Create a vault to store your own Red Hat subscription username/password -in variables. (ie: what you use on the Red Hat portal) +Create a vault to store your own Red Hat subscription username/password in variables. (ie: what you use on the Red Hat portal) **Delete the vault.yml file cloned in this repo first** - `ansible-vault create vault.yml` - this command will open - a vi session with a file called vault.yml. - - add these two lines to this file: - - `vault_rhn_username: my-rhn-support-username` + `ansible-vault create vault.yml` - this command will ask you for a new vault password, and open an editor with a file called vault.yml. Add these two lines to this file (quote the password if if has special characters in it): - `vault_rhn_password: secretpassword-for-rhn` +``` +vault_rhn_username: rhn-support-username +vault_rhn_password: secretpassword-for-rhn +``` - Take a look at the resulting file and it should not have the - variables in cleartext. + Check the resulting file and it should not have the variables in cleartext, but should show something like this: +``` +$ cat vault.yml +$ANSIBLE_VAULT;1.1;AES256 +35613131303036653238396335393661623664383461633066633431633038666665663365623434 +3738376238623639333262633131393738663232376135390a363532346165613633336533326631 +65353032326234326434313834613231653064313231396562336563363430396162373036303261 +6266623032333137360a653465656263633863336232383632383435613865393335333237626138 +36656530653361336463663563306466336462656164313365373338323564663264316462343630 +33383730316539363632366235623266613364633135316261623339383963623235343334373933 +30383730613836306530383266343139363335363431376366333132643232316462373937363235 +34386365613330636266313337616236356262613432313231383864343261316638353864353435 +6164 +``` ### 14. (not superstitious, just careful) Edit hosts.ocp -Change `oreg_auth_user` to your Red Hat subscription name, and -`oreg_auth_password` to the Red Hat subscription password. +Change these: +`oreg_auth_user` to your Red Hat subscription name +`oreg_auth_password` to the Red Hat subscription password -Change `openshift_master_default_subdomain` to the OCPDOMAIN you specified in -the env.sh file. +Change `openshift_master_default_subdomain` to the OCPDOMAIN you specified in the `env.sh` file. ## Run on your hypervisor @@ -231,30 +200,21 @@ the env.sh file. * `1-create-jump.sh` -- ditto for jump server * `2-build.sh` -- Install VMs and attach disks * `2-build-jump.sh` -- Install jump VM and attach disk -* `start-all.sh` -- boot them up -* `$ virsh start jump.domain.com` +* `start-all.sh` -- boot them up / `$ virsh start jump.example.com` * `3-keys.sh` -- push ssh keys around -* `4-prep.sh` -- update the VMs with required packages, etc +* `4-prep.sh` -- update the VMs with required packages, etc -- this step takes a while * `4-prep-jump.sh` -- update the VMs with required packages, etc * `5-cluster.sh` -- copy files to jump VMs and remind the next steps ### Install OpenShift * `hypervisor$ ssh root@jump.pokitoach.com # password is redhat` -* `jump# ssh-keygen` -* `jump# bash ./3-keys.sh` -* `jump# ansible-playbook -i hosts.ocp /usr/share/ansible/openshift-ansible/playbooks/deploy-cluster.yml` - -May need to scp ssh keys and/or ssh into the other nodes from the jump node to -make sure the known_hosts file is updated. - -If you run into the WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! message, -use +* ` jump# ssh-keygen` (accept all defaults) +* ` jump# bash ./3-keys.sh` +* ` jump# ansible-playbook -i hosts.ocp /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml` +* ` jump# ansible-playbook -i hosts.ocp /usr/share/ansible/openshift-ansible/playbooks/deploy-cluster.yml` +* ` jump# ssh root@ "htpasswd -b /etc/origin/master/htpasswd " ` -`ssh-keygen -R node.domain.com` - -for each host. Then you need to ssh into them again to add them to the known_hosts -file. * Based on what we specified in the hosts.ocp file we are using the HTPasswdPasswordIdentityProvider type of RBAC in OpenShift. So we need to populate the htpasswd file with users for our system. @@ -266,6 +226,46 @@ the htpasswd file with users for our system. ### Start using OpenShift The easiest way to get started is to point a browser to -https://ocp.$OCPDOMAIN:8443/. +https://ocp.$OCPDOMAIN:8443/ Good luck! + +# Troubleshooting notes + +## SSH Keys +If you run into the `WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!` message, use `ssh-keygen -R node.example.com` for each host. Then you need to ssh into them again to add them to the `known_hosts` file. This shouldn't come up often though, as the `kill-all.sh` script should be removing them automatically. + +May need to scp ssh keys and/or ssh into the other nodes from the jump node to make sure the `known_hosts` file is updated. + + +# Random notes from the 5-cluster script: + +https://access.redhat.com/documentation/en-us/openshift_container_platform/3.5/html/installation_and_configuration/installing-a-cluster#what-s-next-2 + +Once the cluster is created, + +ssh root@master0.$DOMAIN and do: + + htpasswd -b /etc/origin/master/htpasswd marc SekretPassword + oadm policy add-role-to-user system:registry marc + + +https://access.redhat.com/documentation/en-us/openshift_container_platform/3.5/html/installation_and_configuration/setting-up-the-registry#install-config-registry-overview + +for non production use (may not have to do) + +$ sudo chown 1001:root +$ oadm registry --service-account=registry \ + --config=/etc/origin/master/admin.kubeconfig \ + --images='registry.access.redhat.com/openshift3/ose-${component}:${version}' \ + --mount-host= + +https://access.redhat.com/documentation/en-us/openshift_container_platform/3.5/html/installation_and_configuration/setting-up-a-router#install-config-router-overview + +oadm policy add-cluster-role-to-user \ + cluster-reader \ + system:serviceaccount:default:router + +oadm router --replicas= --service-account=router + +https://master0.$DOMAIN:8443/ diff --git a/config.yml b/config.yml new file mode 100644 index 0000000..525e165 --- /dev/null +++ b/config.yml @@ -0,0 +1,43 @@ +### All tasks are commented out, only uncomment necessary tasks to run. ### +# +### Syntax to run: ansible-playbook -i config.yml + +# Deploy OpenShift +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml + +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/installing_clusters/install-running-installation-playbooks#advanced-retrying-installation +# A la carte playbooks to reconfigure infrastructure and install additional components +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/pre-install.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-node/bootstrap.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-etcd/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-nfs/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-loadbalancer/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-master/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-master/additional_config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-node/join.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-glusterfs/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-hosted/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-monitoring/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-web-console/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-console/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-metrics/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-logging/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-monitor-availability/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-service-catalog/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-management/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-descheduler/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-node-problem-detector/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-autoheal/config.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/olm/config.yml + +# Upgrade OpenShift +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_11/upgrade.yml + +# Redeploy certificates +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-master/redeploy-openshift-ca.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/redeploy-certificates.yml +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml + +# Uninstall OpenShift +# - import_playbook: /usr/share/ansible/openshift-ansible/playbooks/adhoc/uninstall.yml diff --git a/dnsmasq-setup.yml b/dnsmasq-setup.yml new file mode 100644 index 0000000..434d632 --- /dev/null +++ b/dnsmasq-setup.yml @@ -0,0 +1,18 @@ +--- +- name: "add wildcard to dnsmasq config on nodes" + hosts: ocp + tasks: + - name: 'template for the config file' + notify: 'restart dnsmasq' + template: + src: 'dnsmasq-wildcard.conf.j2' + dest: '/etc/dnsmasq.d/dnsmasq-wildcard.conf' + mode: 644 + owner: root + group: root + + handlers: + - name: 'restart dnsmasq' + service: + name: dnsmasq + state: restarted diff --git a/dnsmasq-wildcard.conf.j2 b/dnsmasq-wildcard.conf.j2 new file mode 100644 index 0000000..aad6643 --- /dev/null +++ b/dnsmasq-wildcard.conf.j2 @@ -0,0 +1 @@ +address=/{{ openshift_master_default_subdomain }}/{{ homelab_router_ip }} diff --git a/env.sh b/env.sh index 8e78777..07af827 100755 --- a/env.sh +++ b/env.sh @@ -1,36 +1,24 @@ # See README.md -export DOMAIN="pokitoach.com" +export DOMAIN="example.com" declare -A MACADDRESS=( \ ["jump."$DOMAIN]="52:54:00:42:B4:AD" \ - ["master0."$DOMAIN]="52:54:00:2C:C2:A0" \ ["master1."$DOMAIN]="52:54:00:AC:C6:E1" \ - ["master2."$DOMAIN]="52:54:00:DE:6B:C4" \ - ["node0."$DOMAIN]="52:54:00:96:FF:84" \ ["node1."$DOMAIN]="52:54:00:4A:22:9B" \ ["node2."$DOMAIN]="52:54:00:4A:22:9C" \ - ["node3."$DOMAIN]="52:54:00:4A:22:9D" \ - ["xjump."$DOMAIN]="64:54:00:42:B4:00" \ - ["xmaster0."$DOMAIN]="64:54:00:42:B4:01" \ - ["xnode0."$DOMAIN]="64:54:00:42:B4:02" \ - ["xnode1."$DOMAIN]="64:54:00:42:B4:03" \ - ["xnode2."$DOMAIN]="64:54:00:42:B4:04" \ - ["xnode3."$DOMAIN]="64:54:00:42:B4:05" \ - ["xnode4."$DOMAIN]="64:54:00:42:B4:06" \ - ["xnode5."$DOMAIN]="64:54:00:42:B4:07" \ - ["xnode6."$DOMAIN]="64:54:00:42:B4:08" \ - ["xnode7."$DOMAIN]="64:54:00:42:B4:09" \ - ["xnode8."$DOMAIN]="64:54:00:42:B4:10" \ - ["xnode9."$DOMAIN]="64:54:00:42:B4:11" \ ) -export OCPDOMAIN="hupiper.com" +export OCPDOMAIN="ocp.example.com" export WORKSPACE="$HOME/ocp" -export VMS="$WORKSPACE/VMs" -export ISOS="$HOME/ISOs" -export RHEL_IMAGE="$ISOS/rhel-server-7.5-x86_64-kvm.qcow2" -export BRIDGE="LondonBridge" # or virbr0 depending on your needs +export VMS="/vms/ocp/" +export ORIGINALKEY=$HOME/.ssh/id_rsa.pub +export CLUSTERKEY=$WORKSPACE/vm_id_rsa.pub +export ISOS="/isos" +export RHEL_IMAGE="$ISOS/rhel-server-7.6-x86_64-kvm.qcow2" +export BRIDGE="brtwo" # or virbr0 depending on your needs #export BRIDGE="virbr0" export VMRAM_JUMP=8192 -export VMRAM_OCP=24576 -export VMROOTDISK=120G +export VMRAM_OCP=4084 +export VMROOTDISK=60G export VMDOCKERDISK=10G export VMGLUSTERFSDISK=10G +export CPUMODEL=Westmere-IBRS +export INITIALPASSWD=redhat diff --git a/group_vars/OSEv3/auth.yml b/group_vars/OSEv3/auth.yml new file mode 100644 index 0000000..a1d948c --- /dev/null +++ b/group_vars/OSEv3/auth.yml @@ -0,0 +1,10 @@ +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/configuring_clusters/index#install-config-configuring-authentication + +# Configure htpasswd authentication +openshift_master_identity_providers: + - name: htpasswd_auth + login: true + challenge: true + kind: HTPasswdPasswordIdentityProvider +# Define htpasswd users file locally on master node +openshift_master_htpasswd_users: {"admin":"$apr1$S2BW9z31$90mxjme7O/ao8FWuVt9Nt/"} diff --git a/group_vars/OSEv3/crio.yml b/group_vars/OSEv3/crio.yml new file mode 100644 index 0000000..55cc65e --- /dev/null +++ b/group_vars/OSEv3/crio.yml @@ -0,0 +1,16 @@ +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/cri-o_runtime/use-crio-engine#installing-cri-o-with-a-new-openshift-container-platform-cluster + +# For the default Docker setup, keep this configuration commented out + +# Install and run cri-o. +openshift_use_crio: False +# openshift_crio_use_rpm: True + +# The following two variables are used when openshift_use_crio is True +# and cleans up after builds that pass through docker. When openshift_use_crio is True +# these variables are set to the defaults shown. You may override them here. +# NOTE: You will still need to tag crio nodes with your given label(s)! +# Enable docker garbage collection when using cri-o +# openshift_crio_enable_docker_gc: True +# Node Selectors to run the garbage collection +# openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'} diff --git a/group_vars/OSEv3/glusterfs.yml b/group_vars/OSEv3/glusterfs.yml new file mode 100644 index 0000000..459014c --- /dev/null +++ b/group_vars/OSEv3/glusterfs.yml @@ -0,0 +1,18 @@ +# Configure GlusterFS for applications +# https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/3.11/html/deployment_guide/chap-Documentation-Red_Hat_Gluster_Storage_Container_Native_with_OpenShift_Platform-Basic_Install + +# Specify GlusterFS image versions +openshift_storage_glusterfs_image: registry.redhat.io/rhgs3/rhgs-server-rhel7:v3.11 +openshift_storage_glusterfs_block_image: registry.redhat.io/rhgs3/rhgs-gluster-block-prov-rhel7:v3.11 +openshift_storage_glusterfs_heketi_image: registry.redhat.io/rhgs3/rhgs-volmanager-rhel7:v3.11 + +# Destroy data on block devices prior to install +openshift_storage_glusterfs_wipe: true + +openshift_storage_glusterfs_namespace: app-storage +openshift_storage_glusterfs_storageclass: true +openshift_storage_glusterfs_storageclass_default: false +openshift_storage_glusterfs_block_deploy: true +openshift_storage_glusterfs_block_host_vol_size: 30 +openshift_storage_glusterfs_block_storageclass: true +openshift_storage_glusterfs_block_storageclass_default: false diff --git a/group_vars/OSEv3/logging.yml b/group_vars/OSEv3/logging.yml new file mode 100644 index 0000000..e5627c6 --- /dev/null +++ b/group_vars/OSEv3/logging.yml @@ -0,0 +1,11 @@ +# Install logging +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/installing_clusters/install-config-configuring-inventory-file#advanced-install-cluster-logging +openshift_logging_install_logging: true +openshift_logging_es_nodeselector: {"node-role.kubernetes.io/infra":"true"} +openshift_logging_kibana_nodeselector: {"node-role.kubernetes.io/infra":"true"} +openshift_logging_curator_nodeselector: {"node-role.kubernetes.io/infra":"true"} + +# Optional tuning for small home labs +openshift_logging_es_cpu_limit: 500m +openshift_logging_es_memory_limit: 1G +openshift_logging_fluentd_memory_limit: 500M diff --git a/group_vars/OSEv3/main.yml b/group_vars/OSEv3/main.yml new file mode 100644 index 0000000..1973a58 --- /dev/null +++ b/group_vars/OSEv3/main.yml @@ -0,0 +1,18 @@ +# Connect to nodes as root user. +ansible_ssh_user: root + +# Set deployment type to deploy enterprise as opposed to origin or a stand-alone registry +openshift_deployment_type: openshift-enterprise + +# Disable memory check for small home labs +openshift_disable_check: memory_availability + +# Red Hat recommends firewalld instead of iptables +os_firewall_use_firewalld: true + +# Master configurations +openshift_master_cluster_method: native + +# Install OpenShift templates and service catalog by default +openshift_install_examples: true +openshift_enable_service_catalog: true diff --git a/group_vars/OSEv3/metrics.yml b/group_vars/OSEv3/metrics.yml new file mode 100644 index 0000000..9bbd76f --- /dev/null +++ b/group_vars/OSEv3/metrics.yml @@ -0,0 +1,14 @@ +# Install metrics +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/installing_clusters/install-config-configuring-inventory-file#advanced-install-cluster-metrics +openshift_metrics_install_metrics: true +openshift_metrics_cassandra_nodeselector: {"node-role.kubernetes.io/infra":"true"} +openshift_metrics_hawkular_nodeselector: {"node-role.kubernetes.io/infra":"true"} +openshift_metrics_heapster_nodeselector: {"node-role.kubernetes.io/infra":"true"} + +# Optional tuning for small home labs +openshift_metrics_cassandra_limits_memory: 2G +openshift_metrics_cassandra_requests_memory: 500m +openshift_metrics_hawkular_limits_memory: 2G +openshift_metrics_hawkular_requests_memory: 500m +openshift_metrics_heapster_limits_memory: 2G +openshift_metrics_heapster_requests_memory: 500m diff --git a/group_vars/OSEv3/monitoring.yml b/group_vars/OSEv3/monitoring.yml new file mode 100644 index 0000000..4dff2fa --- /dev/null +++ b/group_vars/OSEv3/monitoring.yml @@ -0,0 +1,4 @@ +# Install Prometheus/Grafana +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/configuring_clusters/index#prometheus-cluster-monitoring +openshift_cluster_monitoring_operator_install: true +openshift_cluster_monitoring_operator_node_selector: {"node-role.kubernetes.io/infra":"true"} diff --git a/group_vars/OSEv3/nodelabels.yml b/group_vars/OSEv3/nodelabels.yml new file mode 100644 index 0000000..a9b8f3a --- /dev/null +++ b/group_vars/OSEv3/nodelabels.yml @@ -0,0 +1,102 @@ +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/installing_clusters/install-config-configuring-inventory-file +openshift_node_groups: + - name: node-config-master + labels: + - 'node-role.kubernetes.io/master=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-infra + labels: + - 'node-role.kubernetes.io/infra=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-compute + labels: + - 'node-role.kubernetes.io/compute=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-master-infra + labels: + - 'node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-all-in-one + labels: + - 'node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true,node-role.kubernetes.io/compute=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-master-crio + labels: + - 'node-role.kubernetes.io/master=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-infra-crio + labels: + - 'node-role.kubernetes.io/infra=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-compute-crio + labels: + - 'node-role.kubernetes.io/compute=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-master-infra-crio + labels: + - 'node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" + - name: node-config-all-in-one-crio + labels: + - 'node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true,node-role.kubernetes.io/compute=true' + edits: + - key: kubeletArguments.image-gc-high-threshold + value: + - "80" + - key: kubeletArguments.image-gc-low-threshold + value: + - "70" diff --git a/group_vars/OSEv3/subscription.yml b/group_vars/OSEv3/subscription.yml new file mode 100644 index 0000000..9bde6b3 --- /dev/null +++ b/group_vars/OSEv3/subscription.yml @@ -0,0 +1,8 @@ +# https://access.redhat.com/solutions/3661111 +# https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/configuring_clusters/install-config-configuring-red-hat-registry#managing-registry-credentials-for-install-upgrade_configuring_red_hat_registry +# you can use your rhn login, or a service account, according to the link above ^ +# populate this with your values +#oreg_url: +# or these: +oreg_auth_user: +oreg_auth_password: diff --git a/hosts b/hosts index 8b6e776..6f1ca1e 100644 --- a/hosts +++ b/hosts @@ -1,3 +1,5 @@ [ocp] -master0.pokitoach.com -node0.pokitoach.com +master1.example.com +node1.example.com +node2.example.com +jump.example.com diff --git a/hosts.3master.example b/hosts.3master.example new file mode 100644 index 0000000..88acb62 --- /dev/null +++ b/hosts.3master.example @@ -0,0 +1,35 @@ +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +lb +etcd + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] + +openshift_master_cluster_hostname=ocp.example.com +openshift_master_cluster_public_hostname=ocp.example.com +openshift_master_default_subdomain=apps.example.com + +# enable ntp on masters to ensure proper failover +openshift_clock_enabled=true + +# host group for masters +[masters] +master1.example.com +master2.example.com +master3.example.com + +# host group for nodes, includes region info +[nodes] +node1.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}" openshift_schedulable=true +node2.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}" openshift_schedulable=true + +[lb] +node1.example.com + +[etcd] +master1.example.com +master2.example.com +master3.example.com diff --git a/hosts.3master.ocp b/hosts.3master.ocp deleted file mode 100644 index 75e970e..0000000 --- a/hosts.3master.ocp +++ /dev/null @@ -1,50 +0,0 @@ -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -lb -etcd - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -# SSH user, this user should allow ssh based auth without requiring a password -ansible_ssh_user=root -openshift_hosted_metrics_deploy=false -openshift_release=v3.6 - -# If ansible_ssh_user is not root, ansible_become must be set to true -#ansible_become=true - -deployment_type=openshift-enterprise - -openshift_master_cluster_hostname=ocp.nozell.com -openshift_master_cluster_public_hostname=ocp.nozell.com -openshift_master_default_subdomain=apps.nozell.com - -# uncomment the following to enable htpasswd authentication; defaults to DenyAllPasswordIdentityProvider -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] - -# enable ntp on masters to ensure proper failover -openshift_clock_enabled=true -openshift_master_cluster_method=native - -# host group for masters -[masters] -master0.gwiki.org -master1.gwiki.org -master2.gwiki.org - -# host group for nodes, includes region info -[nodes] -master0.gwiki.org openshift_node_labels="{'region': 'infra', 'zone': 'default'}" openshift_schedulable=true -master1.gwiki.org openshift_node_labels="{'region': 'infra', 'zone': 'default'}" openshift_schedulable=true -master2.gwiki.org openshift_node_labels="{'region': 'infra', 'zone': 'default'}" openshift_schedulable=true -node0.gwiki.org openshift_node_labels="{'region': 'primary', 'zone': 'east'}" openshift_schedulable=true - -[lb] -node1.gwiki.org - -[etcd] -master0.gwiki.org -master1.gwiki.org -master2.gwiki.org diff --git a/hosts.addnode b/hosts.addnode index fca2b18..8c61009 100644 --- a/hosts.addnode +++ b/hosts.addnode @@ -1,2 +1,2 @@ [jump] -node1.pokitoach.com +jump.example.com diff --git a/hosts.crio.example b/hosts.crio.example new file mode 100644 index 0000000..0045c5d --- /dev/null +++ b/hosts.crio.example @@ -0,0 +1,23 @@ +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +openshift_master_default_subdomain=ocp.example.com + +# host group for masters +[masters] +master1.example.com + +# host group for etcd +[etcd] +master1.example.com + +# host group for nodes +[nodes] +master1.example.com openshift_node_group_name='node-config-master-infra-crio' +node1.example.com openshift_node_group_name='node-config-compute-crio' +node2.example.com openshift_node_group_name='node-config-compute-crio' diff --git a/hosts.jump b/hosts.jump index d5567c4..8c61009 100644 --- a/hosts.jump +++ b/hosts.jump @@ -1,2 +1,2 @@ [jump] -jump.pokitoach.com +jump.example.com diff --git a/hosts.local b/hosts.local index c157a8c..89139db 100644 --- a/hosts.local +++ b/hosts.local @@ -1 +1 @@ -mnemosyne.gwiki.org +lab.example.com diff --git a/hosts.ocp b/hosts.ocp deleted file mode 100644 index 016168c..0000000 --- a/hosts.ocp +++ /dev/null @@ -1,44 +0,0 @@ -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] - -# optional -debug_level=4 - -ansible_ssh_user=root - -openshift_deployment_type=openshift-enterprise -oreg_auth_user= -oreg_auth_password= - -openshift_master_cluster_method=native - -openshift_master_default_subdomain=apps.hupiper.com - -openshift_metrics_install_metrics=true -openshift_metrics_hawkular_hostname=hawkular-metrics.{{openshift_master_default_subdomain}} -openshift_logging_install_logging=true - -openshift_enable_service_catalog=true - -openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} - -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] - -# host group for masters -[masters] -master0.pokitoach.com - -# host group for etcd -[etcd] -master0.pokitoach.com - -# host group for nodes -[nodes] -master0.pokitoach.com openshift_node_group_name='node-config-master-infra' -node0.pokitoach.com openshift_node_group_name='node-config-compute' diff --git a/hosts.ocp.template b/hosts.ocp.template deleted file mode 100644 index 8b13789..0000000 --- a/hosts.ocp.template +++ /dev/null @@ -1 +0,0 @@ - diff --git a/hosts.small.example b/hosts.small.example new file mode 100644 index 0000000..17ce25d --- /dev/null +++ b/hosts.small.example @@ -0,0 +1,23 @@ +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +openshift_master_default_subdomain=ocp.example.com + +# host group for masters +[masters] +master1.example.com + +# host group for etcd +[etcd] +master1.example.com + +# host group for nodes +[nodes] +master1.example.com openshift_node_group_name='node-config-master-infra' +node1.example.com openshift_node_group_name='node-config-compute' +node2.example.com openshift_node_group_name='node-config-compute' diff --git a/kill-all.sh b/kill-all.sh index c31bf6b..7ad1593 100755 --- a/kill-all.sh +++ b/kill-all.sh @@ -1,23 +1,65 @@ #!/bin/bash -source ./env.sh +DESTROY=no + +usage() { + echo "Usage $0:" + echo "-h for help" + echo "-k to destroy instead of shutdown" + echo "-f " +} + +kill_hosts() { + if [ -z "$HOSTSFILE" ]; then + # set the hosts file to 'hosts' if the user doesn't specify it with -f + HOSTSFILE=hosts + fi + + if [ ! -r "$HOSTSFILE" ]; then + echo "$0: Error - ${HOSTSFILE} is missing." + exit 10 + fi + + # a safer, cleaner loop over the hosts file to read in lines, rather than words + grep -E '^[^\[ ]' < "$HOSTSFILE" | while IFS= read -r i + do + echo "################################## Removing: ${i}" -for i in `cat hosts|grep -v \\\\[`; -do + BASEIMAGE="${VMS}/${i}-base.qcow2" + IMAGE="${VMS}/${i}.qcow2" + XMLFILE="${VMS}/${i}.xml" + DOCKERDISK="${VMS}/${i}-docker.qcow2" + GLUSTERDISK="${VMS}/${i}-glusterfs.qcow2" - echo "########################################################################" + if [ "$DESTROY" == 'yes' ]; then + virsh destroy "$i" + else + virsh shutdown "$i" + fi - baseimage="$VMS/$i-base.qcow2" - image="$VMS/$i.qcow2" - xmlfile="$VMS/$i.xml" - dockerdisk="$VMS/$i-docker.qcow2" + virsh undefine "$i" + rm "$BASEIMAGE" "$IMAGE" "$DOCKERDISK" "$XMLFILE" "$GLUSTERDISK" + ssh-keygen -R "$i" + done - virsh shutdown $i - virsh undefine $i - rm $baseimage $image $dockerdisk $xmlfile + exit 0 +} +source ./env.sh + +while getopts h?k?f: option +do +case "${option}" +in +h) usage + exit 0;; +f) HOSTSFILE="$OPTARG" ;; +k) DESTROY=yes ;; +?) usage + exit 1;; +esac done -virsh list --all +kill_hosts -exit +virsh list --all diff --git a/network-bridge-setup.sh b/network-bridge-setup.sh old mode 100644 new mode 100755 index ee4c311..2a78c59 --- a/network-bridge-setup.sh +++ b/network-bridge-setup.sh @@ -1,5 +1,5 @@ sudo nmcli con add type bridge con-name LondonBridge ifname LondonBridge -sudo nmcli con add type ethernet con-name UK-slave ifname enp6s0 master LondonBridge +sudo nmcli con add type ethernet con-name UK-slave ifname eno4 master LondonBridge sudo nmcli con modify LondonBridge bridge.stp no sudo nmcli con up LondonBridge sudo nmcli con up UK-slave diff --git a/post-config.yml b/post-config.yml new file mode 100644 index 0000000..86c3c51 --- /dev/null +++ b/post-config.yml @@ -0,0 +1,6 @@ +- name: Configure dnsmasq to use wildcard DNS + hosts: nodes + vars: + homelab_router_ip: 192.168.1.1 + roles: + - dnsmasq-wildcard diff --git a/prep-os-for-bastion.yml b/prep-os-for-bastion.yml index 47c4c17..c5f1aab 100644 --- a/prep-os-for-bastion.yml +++ b/prep-os-for-bastion.yml @@ -45,7 +45,7 @@ name: '*' state: latest - - name: "Install required packages" + - name: "Ansible and scripts to drive the openshift installation plus other packages" yum: name={{ item }} state=present with_items: - wget @@ -59,13 +59,6 @@ - sos - psacct - yum-utils - - openshift-ansible - - docker - - - - name: "Ansible and scripts to drive the openshift installation" - yum: name={{ item }} state=present - with_items: - openshift-ansible - ansible diff --git a/prep-os-for-ocp.yml b/prep-os-for-ocp.yml index 65a03a2..4c03392 100644 --- a/prep-os-for-ocp.yml +++ b/prep-os-for-ocp.yml @@ -60,7 +60,6 @@ - sos - psacct - yum-utils - - openshift-ansible - docker @@ -95,12 +94,6 @@ # with_items: # - emacs-nox - - name: "Ansible and scripts to drive the openshift installation" - yum: name={{ item }} state=present - with_items: - - openshift-ansible - - ansible - # - name: "Install Docker" # yum: # name: docker-1.13.1 diff --git a/roles/dnsmasq-wildcard/defaults/main.yml b/roles/dnsmasq-wildcard/defaults/main.yml new file mode 100644 index 0000000..0f5cf3e --- /dev/null +++ b/roles/dnsmasq-wildcard/defaults/main.yml @@ -0,0 +1 @@ +homelab_router_ip: 192.168.1.1 diff --git a/roles/dnsmasq-wildcard/handlers/main.yml b/roles/dnsmasq-wildcard/handlers/main.yml new file mode 100644 index 0000000..de1fe55 --- /dev/null +++ b/roles/dnsmasq-wildcard/handlers/main.yml @@ -0,0 +1,4 @@ +- name: 'restart dnsmasq' + service: + name: dnsmasq + state: restarted diff --git a/roles/dnsmasq-wildcard/tasks/main.yml b/roles/dnsmasq-wildcard/tasks/main.yml new file mode 100644 index 0000000..9661260 --- /dev/null +++ b/roles/dnsmasq-wildcard/tasks/main.yml @@ -0,0 +1,8 @@ +- name: Add wildcard to dnsmasq config on nodes + notify: 'restart dnsmasq' + template: + src: 'dnsmasq-wildcard.conf.j2' + dest: '/etc/dnsmasq.d/dnsmasq-wildcard.conf' + mode: 644 + owner: root + group: root diff --git a/roles/dnsmasq-wildcard/templates/dnsmasq-wildcard.conf.j2 b/roles/dnsmasq-wildcard/templates/dnsmasq-wildcard.conf.j2 new file mode 100644 index 0000000..aad6643 --- /dev/null +++ b/roles/dnsmasq-wildcard/templates/dnsmasq-wildcard.conf.j2 @@ -0,0 +1 @@ +address=/{{ openshift_master_default_subdomain }}/{{ homelab_router_ip }} diff --git a/start-all.sh b/start-all.sh index af81d05..11cc266 100755 --- a/start-all.sh +++ b/start-all.sh @@ -1,8 +1,47 @@ #!/bin/bash -for i in `cat hosts|grep -v \\\\[`; -do - virsh start $i - sleep 5 +usage() { + echo "$0:" + echo "-h for help" + echo "-f " + exit 0 +} + +start_hosts() { + if [ -z "$1" ]; then + # set the hosts file to 'hosts' if the user doesn't specify it with -f + HOSTSFILE=hosts + else + HOSTSFILE=$1 + fi + + if [ ! -r "$HOSTSFILE" ]; then + echo "${HOSTSFILE} is missing." + exit 10 + fi + + # a safer, cleaner loop over the hosts file to read in lines, rather than words + grep -E '^[^\[ ]' < "$HOSTSFILE" | while IFS= read -r i + do + virsh start "$i" + #sleep 5 + done + + echo "DEBUG: start_hosts end" + exit 0 +} + +while getopts h?f: option +do +case "${option}" +in +h) usage + exit 0;; +f) start_hosts "$OPTARG" ;; +?) echo "unexpected flag: '${OPTARG}'" + exit;; +esac done +start_hosts + diff --git a/stop-all.sh b/stop-all.sh index 460ad91..9cd8abe 100755 --- a/stop-all.sh +++ b/stop-all.sh @@ -1,7 +1,51 @@ #!/bin/bash -for i in `cat hosts|grep -v \\\\[`; -do - virsh shutdown $i +DESTROY=no + +usage() { + echo "Usage $0:" + echo "-h for help" + echo "-k to destroy instead of shutdown" + echo "-f " +} + +stop_hosts() { + if [ -z "$HOSTSFILE" ]; then + # set the hosts file to 'hosts' if the user doesn't specify it with -f + HOSTSFILE=hosts + fi + + if [ ! -r "$HOSTSFILE" ]; then + echo "${HOSTSFILE} is missing." + exit 10 + fi + + # a safer, cleaner loop over the hosts file to read in lines, rather than words + grep -E '^[^\[ ]' < "$HOSTSFILE" | while IFS= read -r i + do + if [ "$DESTROY" == 'yes' ]; then + virsh destroy "$i" + else + virsh shutdown "$i" + fi + done + + exit 0 +} + +while getopts h?k?f: option +do +case "${option}" +in +h) usage + exit 0;; +f) HOSTSFILE="$OPTARG" ;; +k) DESTROY=yes;; +?) usage + exit 1;; +esac done +stop_hosts + +virsh list --all diff --git a/variables.yml b/variables.yml index 0faec49..0eddfad 100644 --- a/variables.yml +++ b/variables.yml @@ -4,4 +4,4 @@ ## subscription-manager list --all --available --matches "*openshift*" ## ## This is tied to my Red Hat subscription, so it won't work for you. -openshift_subscription_pool: 8a85f9833e1404a9013e3cddf95a0599 +openshift_subscription_pool: 8a85f9813cf493fe013d028b6cf75b5a diff --git a/vault.yml b/vault.yml index 8cc9d6b..4282c6f 100644 --- a/vault.yml +++ b/vault.yml @@ -1,9 +1,9 @@ $ANSIBLE_VAULT;1.1;AES256 -38626261663930323066396435353563386231373830306439616331353938306261613638353134 -6231326438323932333131653162383537636633326332300a653631306266356364303337613866 -35636236383835323934643537393338313764336535613766336339376237323532636365376438 -3038626461383462300a393266623537656562306138323262376361383563363935616534633533 -65316531313764366432616636363232623739396130343066616663326637656534336237363431 -63373161643936336235343761643564646337333330333333313734346634646535633439393533 -38373734616362373862303164343563343037323939376333363062316136623331643230363362 -66306631386234653931 +61663135633033666636383533313033316365353830653563363431373839383637306432336161 +3866633736643130313536326233646564306430613065610a346532346537666334393763646663 +63373833366530393165373061303861623063373633366166323337616137646334636636663135 +3138633233623864350a323530393935316664366536363863323466633037646165353335653935 +63306630353166353335373033393433363737376636323230313433666266333765623232643838 +36623265646238653234333138313430663037383630646463393038613337313062313663316533 +34356434323436313134346236613935333564383936623332333838303462646131363130383135 +31303831343461386632