diff --git a/prepare-pks/README.md b/prepare-pks/README.md new file mode 100644 index 000000000..688c27945 --- /dev/null +++ b/prepare-pks/README.md @@ -0,0 +1,36 @@ +# Instructions for preparing a PKS Kubernetes Cluster + +## pre-reqs + +* ingress controller (nginx or nsxt) +* gangway (or similar for kubeconfig files) + +## Create users + +This example will create 50 random users in UAAC and corresponding Kubernetes users and rbac. + +```bash +$ cd users +$ ./random-users.sh 50 +... +... +$ ./create.sh +... +... +``` + +This will install helm tiller for each: + +```bash +$ ./helm.sh +... +... +``` + +This will clean up: + +```bash +$ ./delete.sh +... +... +``` diff --git a/prepare-pks/users/create.sh b/prepare-pks/users/create.sh new file mode 100755 index 000000000..98523fe61 --- /dev/null +++ b/prepare-pks/users/create.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + echo "--> Adding user $col1 with password $col2" + echo "====> UAAC" + uaac user add $col1 --emails $col1@pks -p $col2 + echo "====> Kubernetes" + cat user-role-etc.yaml | sed "s/__username__/$col1/" | kubectl apply -f - +done < users.txt diff --git a/prepare-pks/users/delete.sh b/prepare-pks/users/delete.sh new file mode 100755 index 000000000..6fff38f54 --- /dev/null +++ b/prepare-pks/users/delete.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + echo "--> Deleting user $col1 with password $col2" + echo "====> UAAC" + uaac user delete $col1 + echo "====> Kubernetes" + cat user-role-etc.yaml | sed "s/__username__/$col1/" | kubectl delete -f - +done < users.txt \ No newline at end of file diff --git a/prepare-pks/users/helm.sh b/prepare-pks/users/helm.sh new file mode 100755 index 000000000..cdda4ede1 --- /dev/null +++ b/prepare-pks/users/helm.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + +kubectl -n $col1 create serviceaccount tiller + +kubectl -n $col1 create role tiller --verb '*' --resource '*' + +kubectl -n $col1 create rolebinding tiller --role tiller --serviceaccount ${col1}:tiller + +kubectl create clusterrole ns-tiller --verb 'get,list' --resource namespaces + +kubectl create clusterrolebinding tiller --clusterrole ns-tiller --serviceaccount ${col1}:tiller + +helm init --service-account=tiller --tiller-namespace=$col1 + +kubectl -n $col1 delete service tiller-deploy + +kubectl -n $col1 patch deployment tiller-deploy --patch ' +spec: + template: + spec: + containers: + - name: tiller + ports: [] + command: ["/tiller"] + args: ["--listen=localhost:44134"] +' + +done < users.txt diff --git a/prepare-pks/users/random-users.sh b/prepare-pks/users/random-users.sh new file mode 100755 index 000000000..e299afc19 --- /dev/null +++ b/prepare-pks/users/random-users.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [[ -z $1 ]]; then + echo "Usage: ./random-names.sh 55" + exit 1 +fi + +for i in {1..50}; do + PW=`cat /dev/urandom | tr -dc 'a-zA-Z1-9' | fold -w 10 | head -n 1` + echo "user$i,$PW" +done diff --git a/prepare-pks/users/user-role-etc.yaml b/prepare-pks/users/user-role-etc.yaml new file mode 100644 index 000000000..0b5dc75cc --- /dev/null +++ b/prepare-pks/users/user-role-etc.yaml @@ -0,0 +1,57 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: __username__ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbac-user-namespace +rules: +- apiGroups: ["", "extensions", "apps", "batch", "autoscaling","networking.k8s.io"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbac-user-cluster +rules: +- apiGroups: ["", "extensions", "apps"] + resources: ["*"] + verbs: ["list"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["list","get"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["*"] + verbs: ["list"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __username__ + namespace: __username__ +subjects: +- kind: User + name: __username__ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: rbac-user-namespace + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __username__ + namespace: __username__ +subjects: +- kind: User + name: __username__ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: rbac-user-cluster + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/prepare-pks/users/users.txt b/prepare-pks/users/users.txt new file mode 100644 index 000000000..450cdc67d --- /dev/null +++ b/prepare-pks/users/users.txt @@ -0,0 +1,2 @@ +user1,user1-password +user2,user2-password diff --git a/slides/images/you-get-a-namespace.jpg b/slides/images/you-get-a-namespace.jpg new file mode 100644 index 000000000..e4a126921 Binary files /dev/null and b/slides/images/you-get-a-namespace.jpg differ diff --git a/slides/k8s/kubectlexpose.md b/slides/k8s/kubectlexpose.md index 0d3bec752..3d475ae29 100644 --- a/slides/k8s/kubectlexpose.md +++ b/slides/k8s/kubectlexpose.md @@ -221,6 +221,8 @@ - We will now send a few HTTP requests to our pods +- But first, we need to do it from *inside* the cluster. We'll explain why later. + .exercise[ - Let's obtain the IP address that was allocated for our service, *programmatically:* @@ -234,24 +236,50 @@ ```key ^C``` --> +- Run a Pod that we can connect to and run shell commands: + ```bash + kubectl run shpod --image=jpetazzo/shpod --restart=Never -- -c "sleep 2400" + ``` +] + +-- + +This Pod will live for 2400 seconds (4 hours) before exiting. Which means we can re-use it throughout the workshop. + +--- + +## Testing our service + +- *Now* we can send a few HTTP requests to our Pods + +.exercise[ + - Send a few requests: ```bash - curl http://$IP:8888/ + kubectl exec shpod -- curl -s http://$IP:8888/ ``` - Too much output? Filter it with `jq`: ```bash - curl -s http://$IP:8888/ | jq .HOSTNAME + kubectl exec shpod -- curl -s http://$IP:8888/ | jq -r .HOSTNAME + ``` + +- Loop it 5 times: + ```bash + for i in {1..5}; do + kubectl exec shpod -- curl -s http://$IP:8888/ | jq -r .HOSTNAME; + done ``` ] -- -Try it a few times! Our requests are load balanced across multiple pods. +Our requests are load balanced across multiple pods. --- + class: extra-details ## `ExternalName` @@ -407,7 +435,7 @@ class: extra-details - This is the internal DNS server that can resolve service names -- The default domain name for the service we created is `default.svc.cluster.local` +- The default domain name for the service we created is `default.svc.cluster.local` (unless you deployed to a namespace other than default) .exercise[ @@ -418,11 +446,34 @@ class: extra-details - Resolve the cluster IP for the `httpenv` service: ```bash - host httpenv.default.svc.cluster.local $IP + kubectl exec shpod -- nslookup httpenv $IP ``` ] +--- + +## Accessing services via DNS + + +* When accessing `httpenv` from another Pod you can use DNS: `httpenv`, `httpenv.` or `httpenv..svc.cluster.local`. + +.exercise[ +- curl the service from its name: + ```bash + kubectl exec shpod -- curl -s http://httpenv:8888/ | jq -r .HOSTNAME + ``` + +- curl the service from its fqdn: + ```bash + NS=$(kubectl get svc httpenv -o go-template --template '{{ .metadata.namespace }}') + + kubectl exec shpod -- curl -s http://httpenv.$NS.svc.cluster.local:8888/ | \ + jq -r .HOSTNAME + ``` +] + + --- class: extra-details diff --git a/slides/k8s/kubectlget.md b/slides/k8s/kubectlget.md index c5ba158f1..4ec1d01e1 100644 --- a/slides/k8s/kubectlget.md +++ b/slides/k8s/kubectlget.md @@ -214,10 +214,14 @@ class: extra-details .exercise[ -- Look at the information available for `node1` with one of the following commands: +- Look at the information available for all nodes with one of the following commands: ```bash - kubectl describe node/node1 - kubectl describe node node1 + kubectl describe nodes + ``` + +- Look at just the first node using a node name from the previous `kubectl get nodes` command: + ``` + kubectl describe node ``` ] @@ -358,6 +362,8 @@ class: extra-details ## What about `kube-public`? +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + .exercise[ - List the pods in the `kube-public` namespace: @@ -377,6 +383,8 @@ class: extra-details ## Exploring `kube-public` +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + - The only interesting object in `kube-public` is a ConfigMap named `cluster-info` .exercise[ @@ -403,6 +411,8 @@ class: extra-details ## Accessing `cluster-info` +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + - Earlier, when trying to access the API server, we got a `Forbidden` message - But `cluster-info` is readable by everyone (even without authentication) @@ -426,6 +436,8 @@ class: extra-details ## Retrieving `kubeconfig` +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + - We can easily extract the `kubeconfig` file from this ConfigMap .exercise[ @@ -475,10 +487,10 @@ class: extra-details .exercise[ -- List the services on our cluster with one of these commands: +- List the services in our default namespace with one of these commands: ```bash - kubectl get services - kubectl get svc + kubectl -n default get services + kubectl -n default get svc ``` ] diff --git a/slides/k8s/labels-annotations.md b/slides/k8s/labels-annotations.md index 790841baa..bd8267ab6 100644 --- a/slides/k8s/labels-annotations.md +++ b/slides/k8s/labels-annotations.md @@ -44,7 +44,7 @@ So, what do we get? - We see one label: ``` - Labels: app=clock + Labels: app=web ``` - This is added by `kubectl create deployment` @@ -71,7 +71,7 @@ So, what do we get? - Display its information: ```bash - kubectl describe pod clock-xxxxxxxxxx-yyyyy + kubectl describe pod web-xxxxxxxxxx-yyyyy ``` ] @@ -84,11 +84,11 @@ So, what do we get? - We see two labels: ``` - Labels: app=clock + Labels: app=web pod-template-hash=xxxxxxxxxx ``` -- `app=clock` comes from `kubectl create deployment` too +- `app=web` comes from `kubectl create deployment` too - `pod-template-hash` was assigned by the Replica Set @@ -109,9 +109,9 @@ So, what do we get? .exercise[ -- List all the pods with at least `app=clock`: +- List all the pods with at least `app=web`: ```bash - kubectl get pods --selector=app=clock + kubectl get pods --selector=app=web ``` - List all the pods with a label `app`, regardless of its value: @@ -129,14 +129,14 @@ So, what do we get? .exercise[ -- Set a label on the `clock` Deployment: +- Set a label on the `web` Deployment: ```bash - kubectl label deployment clock color=blue + kubectl label deployment web color=blue ``` - Check it out: ```bash - kubectl describe deployment clock + kubectl describe deployment web ``` ] @@ -155,7 +155,7 @@ class: extra-details We can also use negative selectors - Example: `--selector=app!=clock` + Example: `--selector=app!=web` - Selectors can be used with most `kubectl` commands @@ -196,7 +196,19 @@ class: extra-details (dozens of kilobytes is fine, hundreds maybe not so much) -??? +--- + +## Cleanup web deployment + +- Time to clean up web and move on + +.exercise[ + + - delete the web deployment + ```bash + kubectl delete deployment web + ``` +] :EN:- Labels and annotations :FR:- *Labels* et annotations diff --git a/slides/k8s/logs-cli.md b/slides/k8s/logs-cli.md index 288d929ad..fdeeee9a5 100644 --- a/slides/k8s/logs-cli.md +++ b/slides/k8s/logs-cli.md @@ -150,6 +150,19 @@ Exactly what we need! ] +--- + +## Cleanup ping pong deployment + +- Time to clean up pingpong and move on + +.exercise[ + + - delete the pingpong deployment + ```bash + kubectl delete deployment pingpong + ``` +] ??? :EN:- Viewing pod logs from the CLI diff --git a/slides/k8s/scalingdockercoins.md b/slides/k8s/scalingdockercoins.md index 404db95c4..9475a27ab 100644 --- a/slides/k8s/scalingdockercoins.md +++ b/slides/k8s/scalingdockercoins.md @@ -158,28 +158,9 @@ class: extra-details ``` httping ip.ad.dr.ess ``` +-- -- We will use `httping` on the ClusterIP addresses of our services - ---- - -## Obtaining ClusterIP addresses - -- We can simply check the output of `kubectl get services` - -- Or do it programmatically, as in the example below - -.exercise[ - -- Retrieve the IP addresses: - ```bash - HASHER=$(kubectl get svc hasher -o go-template={{.spec.clusterIP}}) - RNG=$(kubectl get svc rng -o go-template={{.spec.clusterIP}}) - ``` - -] - -Now we can access the IP addresses of our services through `$HASHER` and `$RNG`. +We can use the `shpod` we started earlier to run `httping` on the ClusterIP addresses of our services. That way we don't need to expose them to the internet. --- @@ -189,8 +170,8 @@ Now we can access the IP addresses of our services through `$HASHER` and `$RNG`. - Check the response times for both services: ```bash - httping -c 3 $HASHER - httping -c 3 $RNG + kubectl exec -ti shpod -- httping -c 3 hasher + kubectl exec -ti shpod -- httping -c 3 rng ``` ] diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml new file mode 100644 index 000000000..1a5947a1b --- /dev/null +++ b/slides/kube-fullday-namespaced.yml @@ -0,0 +1,128 @@ +title: | + Deploying and Scaling Microservices + with Kubernetes + +#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" +#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" +chat: "In person!" + +gitrepo: github.com/jpetazzo/container.training + +slides: http://container.training/ + +hosts: + - name: "Ann O'Nymous" + emoji: "πŸ‘©πŸ»β€πŸ«" + company: "Megacorp Inc" + twitter: "annonypotomus" + - name: "Stu Dent" + emoji: "πŸ‘¨πŸΎβ€πŸŽ“" + company: "University of Wakanda" + twitter: "stufromwakanda" + +logistics: |- + - logistics: + - The workshop will run from 9:00am + - There will be a lunch break at 12:00pm + - Plus coffee breaks :) + + +#slidenumberprefix: "#SomeHashTag — " + +exclude: +- self-paced + +content: +- shared/title.md +- logistics.md +- k8s/intro.md +- shared/about-slides.md +- shared/chat-room-im.md +#- shared/chat-room-zoom.md +- shared/toc.md +- + - shared/prereqs.md + - namespaced/handson.md + #- shared/webssh.md + - namespaced/connecting.md + #- k8s/versions-k8s.md + - namespaced/sampleapp.md + #- shared/composescale.md + #- shared/hastyconclusions.md + - shared/composedown.md + - k8s/concepts-k8s.md + - k8s/kubectlget.md +- + - k8s/kubectl-run.md + - k8s/batch-jobs.md + - k8s/labels-annotations.md + - k8s/kubectl-logs.md + - k8s/logs-cli.md + - shared/declarative.md + - k8s/declarative.md + - k8s/deploymentslideshow.md + - k8s/kubenet.md + - k8s/kubectlexpose.md + - k8s/shippingimages.md + #- k8s/buildshiprun-selfhosted.md + - k8s/buildshiprun-dockerhub.md + - namespaced/ourapponkube.md + #- k8s/exercise-wordsmith.md +- + - k8s/yamldeploy.md + # - k8s/setup-k8s.md + #- k8s/dashboard.md + #- k8s/kubectlscale.md + - k8s/scalingdockercoins.md + - shared/hastyconclusions.md + - k8s/daemonset.md + #- k8s/dryrun.md + #- k8s/exercise-yaml.md + #- k8s/localkubeconfig.md + #- k8s/accessinternal.md + #- k8s/kubectlproxy.md + - k8s/rollout.md + #- k8s/healthchecks.md + #- k8s/healthchecks-more.md + #- k8s/record.md +- + # TODO - Update namespaces section to explain, but not do excercises + # as user will not have permissions to create ns. + # - k8s/namespaces.md + # TODO - Update to show nginx or generic Ingress vs Traefik specific. + # - k8s/ingress.md + #- k8s/kustomize.md + # - k8s/helm-intro.md + # - k8s/helm-chart-format.md + # - k8s/helm-create-basic-chart.md + # - k8s/helm-create-better-chart.md + # - k8s/helm-secrets.md + # - k8s/exercise-helm.md + # - k8s/create-chart.md + # - k8s/create-more-charts.md + #- k8s/netpol.md + #- k8s/authn-authz.md + #- k8s/csr-api.md + #- k8s/openid-connect.md + #- k8s/podsecuritypolicy.md + - k8s/volumes.md + #- k8s/exercise-configmap.md + #- k8s/build-with-docker.md + #- k8s/build-with-kaniko.md + - k8s/configuration.md + #- k8s/logs-centralized.md + #- k8s/prometheus.md + #- k8s/statefulsets.md + #- k8s/local-persistent-volumes.md + #- k8s/portworx.md + #- k8s/extending-api.md + #- k8s/operators.md + #- k8s/operators-design.md + #- k8s/staticpods.md + #- k8s/owners-and-dependents.md + #- k8s/gitworkflows.md +- + - k8s/whatsnext.md + - k8s/lastwords.md + - k8s/links.md + - shared/thankyou.md diff --git a/slides/kube-fullday.yml b/slides/kube-fullday.yml index a4eb6917f..b19836e54 100644 --- a/slides/kube-fullday.yml +++ b/slides/kube-fullday.yml @@ -26,6 +26,7 @@ content: - shared/toc.md - - shared/prereqs.md + - shared/handson.md #- shared/webssh.md - shared/connecting.md #- k8s/versions-k8s.md diff --git a/slides/kube-halfday.yml b/slides/kube-halfday.yml index 0752d87b3..269f1a522 100644 --- a/slides/kube-halfday.yml +++ b/slides/kube-halfday.yml @@ -27,6 +27,7 @@ content: #- shared/chat-room-zoom-webinar.md - shared/toc.md - - shared/prereqs.md + - shared/handson.md #- shared/webssh.md - shared/connecting.md - k8s/versions-k8s.md diff --git a/slides/kube-twodays.yml b/slides/kube-twodays.yml index a3e9a824c..4dfa33102 100644 --- a/slides/kube-twodays.yml +++ b/slides/kube-twodays.yml @@ -26,6 +26,7 @@ content: - shared/toc.md - - shared/prereqs.md + - shared/handson.md #- shared/webssh.md - shared/connecting.md #- k8s/versions-k8s.md diff --git a/slides/logistics-template.md b/slides/logistics-template.md index 33388a069..2d03bd089 100644 --- a/slides/logistics-template.md +++ b/slides/logistics-template.md @@ -3,13 +3,13 @@ - This slide should be customized by the tutorial instructor(s). - Hello! We are: +@@HOSTS@@ + -- The workshop will run from ... - -- There will be a lunch break at ... - - (And coffee breaks!) +@@LOGISTICS@@ - Feel free to interrupt for questions at any time diff --git a/slides/markmaker.py b/slides/markmaker.py index 5e2b20caa..1135d91ef 100755 --- a/slides/markmaker.py +++ b/slides/markmaker.py @@ -98,6 +98,11 @@ def generatefromyaml(manifest, filename): if "html" not in manifest: manifest["html"] = filename + ".html" + if "logistics" not in manifest: + logistics = "\n- The workshop will run from ...\n- There will be a lunch break at ...\n- Plus coffee breaks!" + logging.warning("logistics not found, using default - {}".format(logistics)) + manifest["logistics"] = logistics + markdown, titles = processcontent(manifest["content"], filename) logging.debug("Found {} titles.".format(len(titles))) toc = gentoc(titles) @@ -111,6 +116,20 @@ def generatefromyaml(manifest, filename): logging.warning("'exclude' is empty.") exclude = ",".join('"{}"'.format(c) for c in exclude) + hosts = manifest.get("hosts", []) + logging.debug("hosts={!r}".format(hosts)) + if not hosts: + logging.warning("'hosts' is empty. Using defaults.") + host_html = "
    \n
  • πŸ‘©πŸ»β€πŸ« Ann O'Nymous @annonypotomus, Megacorp Inc\n
  • πŸ‘¨πŸΎβ€πŸŽ“ Stu Dent @stufromwakanda, University of Wakanda\n
" + else: + host_html = "
    \n" + for host in flatten(hosts): + logging.debug("host: {}".format(host["name"])) + # host_html += "
  • {}\n".format(host["name"]) + host_html += "
  • {} {} @{}, {}\n".format(host["emoji"],host["name"],host["twitter"],host["twitter"],host["company"]) + host_html += "
\n" + logging.debug("host_html: {}".format(host_html)) + # Insert build info. This is super hackish. markdown = markdown.replace( @@ -123,6 +142,8 @@ def generatefromyaml(manifest, filename): html = open("workshop.html").read() html = html.replace("@@MARKDOWN@@", markdown) html = html.replace("@@EXCLUDE@@", exclude) + html = html.replace("@@HOSTS@@", host_html) + html = html.replace("@@LOGISTICS@@", manifest["logistics"]) html = html.replace("@@CHAT@@", manifest["chat"]) html = html.replace("@@GITREPO@@", manifest["gitrepo"]) html = html.replace("@@SLIDES@@", manifest["slides"]) @@ -206,6 +227,8 @@ def processcontent(content, filename): else: repo = subprocess.check_output(["git", "config", "remote.origin.url"]).decode("ascii") repo = repo.strip().replace("git@github.com:", "https://github.com/") + regex = re.compile('\.git$') + repo = regex.sub("", repo) if "BRANCH" in os.environ: branch = os.environ["BRANCH"] else: diff --git a/slides/namespaced/connecting.md b/slides/namespaced/connecting.md new file mode 100644 index 000000000..a75555c78 --- /dev/null +++ b/slides/namespaced/connecting.md @@ -0,0 +1,106 @@ +class: in-person + +## Connecting to our lab environment + +.exercise[ + +- Log into the provided URL with your provided credentials. + +- Follow the instructions on the auth portal to set up a `kubeconfig` file. + +- Check that you can connect to the cluster with `kubectl cluster-info`: + +```bash +$ kubectl cluster-info +Kubernetes master is running at https://k8s.cluster1.xxxx:8443 +CoreDNS is running at https://k8s.cluster1.xxxx:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy +``` +] + +If anything goes wrong β€” ask for help! + +--- + +## Role Based Authorization Control + +You are restricted to a subset of Kubernetes resources in your own namespace. Just like in a real world enterprise cluster. + + +.exercise[ + +1\. Can you create pods? + +``` +$ kubectl auth can-i create pods +``` + +2\. Can you delete namespaces? + +``` +$ kubectl auth can-i delete namespaces +``` +] +-- + +1. You can create pods in your own namespace. +2. You cannot delete namespaces. +--- + +## Doing or re-doing the workshop on your own? + +- Use something like + [Play-With-Docker](http://play-with-docker.com/) or + [Play-With-Kubernetes](https://training.play-with-kubernetes.com/) + + Zero setup effort; but environment are short-lived and + might have limited resources + +- Create your own cluster (local or cloud VMs) + + Small setup effort; small cost; flexible environments + +- Create a bunch of clusters for you and your friends + ([instructions](https://@@GITREPO@@/tree/master/prepare-vms)) + + Bigger setup effort; ideal for group training + +--- + +class: self-paced + +## Get your own Docker nodes + +- If you already have some Docker nodes: great! + +- If not: let's get some thanks to Play-With-Docker + +.exercise[ + +- Go to http://www.play-with-docker.com/ + +- Log in + +- Create your first node + + + +] + +You will need a Docker ID to use Play-With-Docker. + +(Creating a Docker ID is free.) + +--- + +## Terminals + +Once in a while, the instructions will say: +
"Open a new terminal." + +There are multiple ways to do this: + +- create a new window or tab on your machine, and SSH into the VM; + +- use screen or tmux on the VM and open a new window from there. + +You are welcome to use the method that you feel the most comfortable with. diff --git a/slides/namespaced/handson.md b/slides/namespaced/handson.md new file mode 100644 index 000000000..89abef0f6 --- /dev/null +++ b/slides/namespaced/handson.md @@ -0,0 +1,136 @@ +## Hands-on sections + +- The whole workshop is hands-on + +- We are going to build, ship, and run containers! + +- You are invited to reproduce all the demos + +- All hands-on sections are clearly identified, like the gray rectangle below + +.exercise[ + +- This is the stuff you're supposed to do! + +- Go to @@SLIDES@@ to view these slides + + + +] + +--- + +class: in-person + +## Where are we going to run our containers? + +--- + +class: in-person, pic + +![You get a namespace](images/you-get-a-namespace.jpg) + +--- + +class: in-person + +## You get your own namespace + +- We have one big Kubernetes cluster + +- Each person gets a private namespace (not shared with anyone else) + +- They'll remain up for the duration of the workshop + +- You should have a little card with login+password+IP addresses + +- The namespace is the same as your login and should be set automatically when you log in. + + +--- + +class: in-person + +## Why don't we run containers locally? + +- Installing this stuff can be hard on some machines + + (32 bits CPU or OS... Laptops without administrator access... etc.) + +- *"The whole team downloaded all these container images from the WiFi! +
... and it went great!"* (Literally no-one ever) + +- All you need is a computer (or even a phone or tablet!), with: + + - an internet connection + + - a web browser + + - an SSH client + +--- + +class: in-person + +## SSH clients + +_If needed_ + +- On Linux, OS X, FreeBSD... you are probably all set + +- On Windows, get one of these: + + - [putty](http://www.putty.org/) + - Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH) + - [Git BASH](https://git-for-windows.github.io/) + - [MobaXterm](http://mobaxterm.mobatek.net/) + +- On Android, [JuiceSSH](https://juicessh.com/) + ([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh)) + works pretty well + +- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets + +--- + +class: in-person, extra-details + +## What is this Mosh thing? + +*You don't have to use Mosh or even know about it to follow along. +
+We're just telling you about it because some of us think it's cool!* + +- Mosh is "the mobile shell" + +- It is essentially SSH over UDP, with roaming features + +- It retransmits packets quickly, so it works great even on lossy connections + + (Like hotel or conference WiFi) + +- It has intelligent local echo, so it works great even in high-latency connections + + (Like hotel or conference WiFi) + +- It supports transparent roaming when your client IP address changes + + (Like when you hop from hotel to conference WiFi) + +--- + +class: in-person, extra-details + +## Using Mosh + +- To install it: `(apt|yum|brew) install mosh` + +- It has been pre-installed on the VMs that we are using + +- To connect to a remote machine: `mosh user@host` + + (It is going to establish an SSH connection, then hand off to UDP) + +- It requires UDP ports to be open + + (By default, it uses a UDP port between 60000 and 61000) diff --git a/slides/namespaced/ourapponkube.md b/slides/namespaced/ourapponkube.md new file mode 100644 index 000000000..00d9d70a4 --- /dev/null +++ b/slides/namespaced/ourapponkube.md @@ -0,0 +1,162 @@ +# Running our application on Kubernetes + +- We can now deploy our code (as well as a redis instance) + +.exercise[ + +- Deploy `redis`: + ```bash + kubectl create deployment redis --image=redis + ``` + +- Deploy everything else: + ```bash + kubectl create deployment hasher --image=dockercoins/hasher:v0.1 + kubectl create deployment rng --image=dockercoins/rng:v0.1 + kubectl create deployment webui --image=dockercoins/webui:v0.1 + kubectl create deployment worker --image=dockercoins/worker:v0.1 + ``` + +] + +--- + +class: extra-details + +## Deploying other images + +- If we wanted to deploy images from another registry ... + +- ... Or with a different tag ... + +- ... We could use the following snippet: + +```bash + REGISTRY=dockercoins + TAG=v0.1 + for SERVICE in hasher rng webui worker; do + kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG + done +``` + +--- + +## Is this working? + +- After waiting for the deployment to complete, let's look at the logs! + + (Hint: use `kubectl get deploy -w` to watch deployment events) + +.exercise[ + + + +- Look at some logs: + ```bash + kubectl logs deploy/rng + kubectl logs deploy/worker + ``` + +] + +-- + +πŸ€” `rng` is fine ... But not `worker`. + +-- + +πŸ’‘ Oh right! We forgot to `expose`. + +--- + +## Connecting containers together + +- Three deployments need to be reachable by others: `hasher`, `redis`, `rng` + +- `worker` doesn't need to be exposed + +- `webui` will be dealt with later + +.exercise[ + +- Expose each deployment, specifying the right port: + ```bash + kubectl expose deployment redis --port 6379 + kubectl expose deployment rng --port 80 + kubectl expose deployment hasher --port 80 + ``` + +] + +--- + +## Is this working yet? + +- The `worker` has an infinite loop, that retries 10 seconds after an error + +.exercise[ + +- Stream the worker's logs: + ```bash + kubectl logs deploy/worker --follow + ``` + + (Give it about 10 seconds to recover) + + + +] + +-- + +We should now see the `worker`, well, working happily. + +--- + +## Exposing services for external access + +- Now we would like to access the Web UI + +- We will expose it with a `LoadBalancer` + +.exercise[ + +- Create a `NodePort` service for the Web UI: + ```bash + kubectl expose deploy/webui --type=LoadBalancer --port=80 + ``` + +- Check the results that was allocated: + ```bash + kubectl get svc + ``` +] +-- + +Wait a few moments and rerun `kubectl get svc` you should see the `EXTERNAL-IP` go from *pending* to an IP address. + +--- + +## Accessing the web UI + +- We can now connect to the `EXTERNAL-IP` of the allocated load balancer + +.exercise[ + +- Get the `EXTERNAL-IP`: + ```bash + LB=$(kubectl get svc httpenv -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + +- Open the web UI in your browser + ```bash + firefox http://$LB + ``` + +] diff --git a/slides/namespaced/sampleapp.md b/slides/namespaced/sampleapp.md new file mode 100644 index 000000000..e903e3e2c --- /dev/null +++ b/slides/namespaced/sampleapp.md @@ -0,0 +1,359 @@ +# Our sample application + +- We will clone the GitHub repository into our workspace + +> _If you were provided a SSH host, SSH to that first._ + +- The repository also contains scripts and tools that we will use through the workshop + +.exercise[ + + + +- Clone the repository: + ```bash + git clone https://@@GITREPO@@ + ``` + +] + +(You can also fork the repository on GitHub and clone your fork if you prefer that.) + +--- + +## Downloading and running the application + +Let's start this before we look around, as downloading will take a little time... + +> _Only do this if you were provided a SSH host, or your instructor says there is enough bandwidth._ + +.exercise[ + +- Go to the `dockercoins` directory, in the cloned repo: + ```bash + cd ~/container.training/dockercoins + ``` + +- Use Compose to build and run all containers: + ```bash + docker-compose up + ``` + + + +] + +Compose tells Docker to build all container images (pulling +the corresponding base images), then starts all containers, +and displays aggregated logs. + +--- + +## What's this application? + +-- + +- It is a DockerCoin miner! .emoji[πŸ’°πŸ³πŸ“¦πŸš’] + +-- + +- No, you can't buy coffee with DockerCoins + +-- + +- How DockerCoins works: + + - generate a few random bytes + + - hash these bytes + + - increment a counter (to keep track of speed) + + - repeat forever! + +-- + +- DockerCoins is *not* a cryptocurrency + + (the only common points are "randomness," "hashing," and "coins" in the name) + +--- + +## DockerCoins in the microservices era + +- DockerCoins is made of 5 services: + + - `rng` = web service generating random bytes + + - `hasher` = web service computing hash of POSTed data + + - `worker` = background process calling `rng` and `hasher` + + - `webui` = web interface to watch progress + + - `redis` = data store (holds a counter updated by `worker`) + +- These 5 services are visible in the application's Compose file, + [docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) + +--- + +## How DockerCoins works + +- `worker` invokes web service `rng` to generate random bytes + +- `worker` invokes web service `hasher` to hash these bytes + +- `worker` does this in an infinite loop + +- every second, `worker` updates `redis` to indicate how many loops were done + +- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser + +*(See diagram on next slide!)* + +--- + +class: pic + +![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg) + +--- + +## Service discovery in container-land + +How does each service find out the address of the other ones? + +-- + +- We do not hard-code IP addresses in the code + +- We do not hard-code FQDNs in the code, either + +- We just connect to a service name, and container-magic does the rest + + (And by container-magic, we mean "a crafty, dynamic, embedded DNS server") + +--- + +## Example in `worker/worker.py` + +```python +redis = Redis("`redis`") + + +def get_random_bytes(): + r = requests.get("http://`rng`/32") + return r.content + + +def hash_bytes(data): + r = requests.post("http://`hasher`/", + data=data, + headers={"Content-Type": "application/octet-stream"}) +``` + +(Full source code available [here]( +https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17 +)) + +--- + +class: extra-details + +## Links, naming, and service discovery + +- Containers can have network aliases (resolvable through DNS) + +- Compose file version 2+ makes each container reachable through its service name + +- Compose file version 1 required "links" sections to accomplish this + +- Network aliases are automatically namespaced + + - you can have multiple apps declaring and using a service named `database` + + - containers in the blue app will resolve `database` to the IP of the blue database + + - containers in the green app will resolve `database` to the IP of the green database + +--- + +## Show me the code! + +- You can check the GitHub repository with all the materials of this workshop: +
https://@@GITREPO@@ + +- The application is in the [dockercoins]( + https://@@GITREPO@@/tree/master/dockercoins) + subdirectory + +- The Compose file ([docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)) + lists all 5 services + +- `redis` is using an official image from the Docker Hub + +- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile + +- Each service's Dockerfile and source code is in its own directory + + (`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory, + `rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/) + directory, etc.) + +--- + +class: extra-details + +## Compose file format version + +*This is relevant only if you have used Compose before 2016...* + +- Compose 1.6 introduced support for a new Compose file format (aka "v2") + +- Services are no longer at the top level, but under a `services` section + +- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer) + +- Containers are placed on a dedicated network, making links unnecessary + +- There are other minor differences, but upgrade is easy and straightforward + +--- + +## Our application at work + +- On the left-hand side, the "rainbow strip" shows the container names + +- On the right-hand side, we see the output of our containers + +- We can see the `worker` service making requests to `rng` and `hasher` + +- For `rng` and `hasher`, we see HTTP access logs + +--- + +## Connecting to the web UI + +- "Logs are exciting and fun!" (No-one, ever) + +- The `webui` container exposes a web dashboard; let's view it + +.exercise[ + +- With a web browser, connect to `localhost` on port 8000 + +- Remember: the `nodeX` aliases are valid only on the nodes themselves + +- In your browser, you need to enter the IP address of your node + + + +] + +A drawing area should show up, and after a few seconds, a blue +graph will appear. + +--- + +class: self-paced, extra-details + +## If the graph doesn't load + +If you just see a `Page not found` error, it might be because your +Docker Engine is running on a different machine. This can be the case if: + +- you are using the Docker Toolbox + +- you are using a VM (local or remote) created with Docker Machine + +- you are controlling a remote Docker Engine + +When you run DockerCoins in development mode, the web UI static files +are mapped to the container using a volume. Alas, volumes can only +work on a local environment, or when using Docker Desktop for Mac or Windows. + +How to fix this? + +Stop the app with `^C`, edit `dockercoins.yml`, comment out the `volumes` section, and try again. + +--- + +class: extra-details + +## Why does the speed seem irregular? + +- It *looks like* the speed is approximately 4 hashes/second + +- Or more precisely: 4 hashes/second, with regular dips down to zero + +- Why? + +-- + +class: extra-details + +- The app actually has a constant, steady speed: 3.33 hashes/second +
+ (which corresponds to 1 hash every 0.3 seconds, for *reasons*) + +- Yes, and? + +--- + +class: extra-details + +## The reason why this graph is *not awesome* + +- The worker doesn't update the counter after every loop, but up to once per second + +- The speed is computed by the browser, checking the counter about once per second + +- Between two consecutive updates, the counter will increase either by 4, or by 0 + +- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - 0 etc. + +- What can we conclude from this? + +-- + +class: extra-details + +- "I'm clearly incapable of writing good frontend code!" πŸ˜€ β€” JΓ©rΓ΄me + +--- + +## Stopping the application + +- If we interrupt Compose (with `^C`), it will politely ask the Docker Engine to stop the app + +- The Docker Engine will send a `TERM` signal to the containers + +- If the containers do not exit in a timely manner, the Engine sends a `KILL` signal + +.exercise[ + +- Stop the application by hitting `^C` + + + +] + +-- + +Some containers exit immediately, others take longer. + +The containers that do not handle `SIGTERM` end up being killed after a 10s timeout. If we are very impatient, we can hit `^C` a second time! diff --git a/slides/shared/handson.md b/slides/shared/handson.md new file mode 100644 index 000000000..6fa09ab61 --- /dev/null +++ b/slides/shared/handson.md @@ -0,0 +1,133 @@ +## Hands-on sections + +- The whole workshop is hands-on + +- We are going to build, ship, and run containers! + +- You are invited to reproduce all the demos + +- All hands-on sections are clearly identified, like the gray rectangle below + +.exercise[ + +- This is the stuff you're supposed to do! + +- Go to @@SLIDES@@ to view these slides + + + +] + +--- + +class: in-person + +## Where are we going to run our containers? + +--- + +class: in-person, pic + +![You get a cluster](images/you-get-a-cluster.jpg) + +--- + +class: in-person + +## You get a cluster of cloud VMs + +- Each person gets a private cluster of cloud VMs (not shared with anybody else) + +- They'll remain up for the duration of the workshop + +- You should have a little card with login+password+IP addresses + +- You can automatically SSH from one VM to another + +- The nodes have aliases: `node1`, `node2`, etc. + +--- + +class: in-person + +## Why don't we run containers locally? + +- Installing this stuff can be hard on some machines + + (32 bits CPU or OS... Laptops without administrator access... etc.) + +- *"The whole team downloaded all these container images from the WiFi! +
... and it went great!"* (Literally no-one ever) + +- All you need is a computer (or even a phone or tablet!), with: + + - an internet connection + + - a web browser + + - an SSH client + +--- + +class: in-person + +## SSH clients + +- On Linux, OS X, FreeBSD... you are probably all set + +- On Windows, get one of these: + + - [putty](http://www.putty.org/) + - Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH) + - [Git BASH](https://git-for-windows.github.io/) + - [MobaXterm](http://mobaxterm.mobatek.net/) + +- On Android, [JuiceSSH](https://juicessh.com/) + ([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh)) + works pretty well + +- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets + +--- + +class: in-person, extra-details + +## What is this Mosh thing? + +*You don't have to use Mosh or even know about it to follow along. +
+We're just telling you about it because some of us think it's cool!* + +- Mosh is "the mobile shell" + +- It is essentially SSH over UDP, with roaming features + +- It retransmits packets quickly, so it works great even on lossy connections + + (Like hotel or conference WiFi) + +- It has intelligent local echo, so it works great even in high-latency connections + + (Like hotel or conference WiFi) + +- It supports transparent roaming when your client IP address changes + + (Like when you hop from hotel to conference WiFi) + +--- + +class: in-person, extra-details + +## Using Mosh + +- To install it: `(apt|yum|brew) install mosh` + +- It has been pre-installed on the VMs that we are using + +- To connect to a remote machine: `mosh user@host` + + (It is going to establish an SSH connection, then hand off to UDP) + +- It requires UDP ports to be open + + (By default, it uses a UDP port between 60000 and 61000) diff --git a/slides/shared/prereqs.md b/slides/shared/prereqs.md index 78c9b0795..3c3824dde 100644 --- a/slides/shared/prereqs.md +++ b/slides/shared/prereqs.md @@ -31,139 +31,3 @@ class: title Misattributed to Benjamin Franklin [(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/) - ---- - -## Hands-on sections - -- The whole workshop is hands-on - -- We are going to build, ship, and run containers! - -- You are invited to reproduce all the demos - -- All hands-on sections are clearly identified, like the gray rectangle below - -.exercise[ - -- This is the stuff you're supposed to do! - -- Go to @@SLIDES@@ to view these slides - - - -] - ---- - -class: in-person - -## Where are we going to run our containers? - ---- - -class: in-person, pic - -![You get a cluster](images/you-get-a-cluster.jpg) - ---- - -class: in-person - -## You get a cluster of cloud VMs - -- Each person gets a private cluster of cloud VMs (not shared with anybody else) - -- They'll remain up for the duration of the workshop - -- You should have a little card with login+password+IP addresses - -- You can automatically SSH from one VM to another - -- The nodes have aliases: `node1`, `node2`, etc. - ---- - -class: in-person - -## Why don't we run containers locally? - -- Installing this stuff can be hard on some machines - - (32 bits CPU or OS... Laptops without administrator access... etc.) - -- *"The whole team downloaded all these container images from the WiFi! -
... and it went great!"* (Literally no-one ever) - -- All you need is a computer (or even a phone or tablet!), with: - - - an internet connection - - - a web browser - - - an SSH client - ---- - -class: in-person - -## SSH clients - -- On Linux, OS X, FreeBSD... you are probably all set - -- On Windows, get one of these: - - - [putty](http://www.putty.org/) - - Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH) - - [Git BASH](https://git-for-windows.github.io/) - - [MobaXterm](http://mobaxterm.mobatek.net/) - -- On Android, [JuiceSSH](https://juicessh.com/) - ([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh)) - works pretty well - -- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets - ---- - -class: in-person, extra-details - -## What is this Mosh thing? - -*You don't have to use Mosh or even know about it to follow along. -
-We're just telling you about it because some of us think it's cool!* - -- Mosh is "the mobile shell" - -- It is essentially SSH over UDP, with roaming features - -- It retransmits packets quickly, so it works great even on lossy connections - - (Like hotel or conference WiFi) - -- It has intelligent local echo, so it works great even in high-latency connections - - (Like hotel or conference WiFi) - -- It supports transparent roaming when your client IP address changes - - (Like when you hop from hotel to conference WiFi) - ---- - -class: in-person, extra-details - -## Using Mosh - -- To install it: `(apt|yum|brew) install mosh` - -- It has been pre-installed on the VMs that we are using - -- To connect to a remote machine: `mosh user@host` - - (It is going to establish an SSH connection, then hand off to UDP) - -- It requires UDP ports to be open - - (By default, it uses a UDP port between 60000 and 61000)