From c197cc0c5eb060d9afa600a64333bd8623c8dae7 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Wed, 28 Aug 2019 10:23:23 -0500 Subject: [PATCH 01/14] first pass at pks instructions --- slides/k8s/logs-cli.md | 20 +- slides/kube-fullday-namespaced.yml | 91 +++++++ slides/pks/cleanup-dockercoins.md | 12 + slides/pks/concepts-k8s.md | 257 ++++++++++++++++++++ slides/pks/connecting.md | 84 +++++++ slides/pks/dashboard.md | 166 +++++++++++++ slides/pks/helm.md | 216 +++++++++++++++++ slides/pks/ingress.md | 247 +++++++++++++++++++ slides/pks/kubectlexpose.md | 363 ++++++++++++++++++++++++++++ slides/pks/kubectlget.md | 375 +++++++++++++++++++++++++++++ slides/pks/logistics.md | 21 ++ slides/pks/octant.md | 14 ++ slides/pks/ourapponkube.md | 139 +++++++++++ slides/pks/prereqs.md | 115 +++++++++ slides/pks/scalingdockercoins.md | 241 ++++++++++++++++++ slides/pks/setup-k8s.md | 108 +++++++++ 16 files changed, 2466 insertions(+), 3 deletions(-) create mode 100644 slides/kube-fullday-namespaced.yml create mode 100644 slides/pks/cleanup-dockercoins.md create mode 100644 slides/pks/concepts-k8s.md create mode 100644 slides/pks/connecting.md create mode 100644 slides/pks/dashboard.md create mode 100644 slides/pks/helm.md create mode 100644 slides/pks/ingress.md create mode 100644 slides/pks/kubectlexpose.md create mode 100644 slides/pks/kubectlget.md create mode 100644 slides/pks/logistics.md create mode 100644 slides/pks/octant.md create mode 100644 slides/pks/ourapponkube.md create mode 100644 slides/pks/prereqs.md create mode 100644 slides/pks/scalingdockercoins.md create mode 100644 slides/pks/setup-k8s.md diff --git a/slides/k8s/logs-cli.md b/slides/k8s/logs-cli.md index 5a9a6c1ab..71d28d22f 100644 --- a/slides/k8s/logs-cli.md +++ b/slides/k8s/logs-cli.md @@ -84,7 +84,7 @@ Exactly what we need! - View the logs for all the rng containers: ```bash - stern rng + stern ping ``` ] + +--- + +## Cleanup ping pong deployment + +- Time to clean up pingpong and move on + +.exercise[ + + - delete the pingpong deployment + ```bash + kubectl delete deployment pingpong + ``` +] \ No newline at end of file diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml new file mode 100644 index 000000000..f71db8513 --- /dev/null +++ b/slides/kube-fullday-namespaced.yml @@ -0,0 +1,91 @@ +title: | + Deploying and Scaling Microservices + with Kubernetes + +#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" +#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" +chat: "In person!" + +gitrepo: github.com/jpetazzo/container.training + +slides: http://container.training/ + +exclude: +- self-paced + +chapters: +- shared/title.md +- pks/logistics.md +- k8s/intro.md +- shared/about-slides.md +- shared/toc.md +- + - pks/prereqs.md + #- shared/webssh.md + - pks/connecting.md + # - k8s/versions-k8s.md + #- shared/sampleapp.md + #- shared/composescale.md + #- shared/hastyconclusions.md + #- shared/composedown.md + - pks/concepts-k8s.md + - pks/kubectlget.md +- + - k8s/kubectlrun.md + - k8s/logs-cli.md + - shared/declarative.md + - k8s/declarative.md + - k8s/deploymentslideshow.md + - k8s/kubenet.md + - pks/kubectlexpose.md + - k8s/shippingimages.md + #- k8s/buildshiprun-selfhosted.md + - k8s/buildshiprun-dockerhub.md + - pks/ourapponkube.md + #- k8s/kubectlproxy.md + #- k8s/localkubeconfig.md + #- k8s/accessinternal.md +- + - pks/setup-k8s.md + - pks/dashboard.md + - pks/octant.md + #- k8s/kubectlscale.md + - pks/scalingdockercoins.md + - shared/hastyconclusions.md + - k8s/daemonset.md + - k8s/rollout.md + #- k8s/healthchecks.md + #- k8s/healthchecks-more.md + #- k8s/record.md +- + #- k8s/namespaces.md + - pks/ingress.md + - pks/cleanup-dockercoins.md + #- k8s/kustomize.md + #- k8s/helm.md + #- k8s/create-chart.md + #- k8s/netpol.md + #- k8s/authn-authz.md + #- k8s/csr-api.md + #- k8s/openid-connect.md + #- k8s/podsecuritypolicy.md + - k8s/volumes.md + #- k8s/build-with-docker.md + #- k8s/build-with-kaniko.md + - k8s/configuration.md + #- k8s/logs-centralized.md + #- k8s/prometheus.md + #- k8s/statefulsets.md + #- k8s/local-persistent-volumes.md + #- k8s/portworx.md + #- k8s/extending-api.md + #- k8s/operators.md + #- k8s/operators-design.md + #- k8s/staticpods.md + #- k8s/owners-and-dependents.md + #- k8s/gitworkflows.md + - pks/helm.md +- + - k8s/whatsnext.md + - k8s/links.md + - shared/thankyou.md diff --git a/slides/pks/cleanup-dockercoins.md b/slides/pks/cleanup-dockercoins.md new file mode 100644 index 000000000..8fa5bce1a --- /dev/null +++ b/slides/pks/cleanup-dockercoins.md @@ -0,0 +1,12 @@ +# Let's do some housekeeping + +- We've created a lot of resources, let's clean them up. + +.exercise[ + - Delete resources: + ```bash + kubectl delete deployment,svc hasher redis rng webui + kubectl delete deployment worker + kubectl delete ingress webui + kubectl delete daemonset rng +] diff --git a/slides/pks/concepts-k8s.md b/slides/pks/concepts-k8s.md new file mode 100644 index 000000000..edb6f1380 --- /dev/null +++ b/slides/pks/concepts-k8s.md @@ -0,0 +1,257 @@ +# Kubernetes concepts + +- Kubernetes is a container management system + +- It runs and manages containerized applications on a cluster + +-- + +- What does that really mean? + +--- + +## Basic things we can ask Kubernetes to do + +-- + +- Start 5 containers using image `atseashop/api:v1.3` + +-- + +- Place an internal load balancer in front of these containers + +-- + +- Start 10 containers using image `atseashop/webfront:v1.3` + +-- + +- Place a public load balancer in front of these containers + +-- + +- It's Black Friday (or Christmas), traffic spikes, grow our cluster and add containers + +-- + +- New release! Replace my containers with the new image `atseashop/webfront:v1.4` + +-- + +- Keep processing requests during the upgrade; update my containers one at a time + +--- + +## Other things that Kubernetes can do for us + +- Basic autoscaling + +- Blue/green deployment, canary deployment + +- Long running services, but also batch (one-off) jobs + +- Overcommit our cluster and *evict* low-priority jobs + +- Run services with *stateful* data (databases etc.) + +- Fine-grained access control defining *what* can be done by *whom* on *which* resources + +- Integrating third party services (*service catalog*) + +- Automating complex tasks (*operators*) + +--- + +## Kubernetes architecture + +--- + +class: pic + +![haha only kidding](images/k8s-arch1.png) + +--- + +## Kubernetes architecture + +- Ha ha ha ha + +- OK, I was trying to scare you, it's much simpler than that ❤️ + +--- + +class: pic + +![that one is more like the real thing](images/k8s-arch2.png) + +--- + +## Credits + +- The first schema is a Kubernetes cluster with storage backed by multi-path iSCSI + + (Courtesy of [Yongbok Kim](https://www.yongbok.net/blog/)) + +- The second one is a simplified representation of a Kubernetes cluster + + (Courtesy of [Imesh Gunaratne](https://medium.com/containermind/a-reference-architecture-for-deploying-wso2-middleware-on-kubernetes-d4dee7601e8e)) + +--- + +## Kubernetes architecture: the data plane + +- The data plane is a collection of nodes that execute our containers + +- These nodes run a collection of services: + + - a container Engine (typically Docker) + + - kubelet (the "node agent") + + - kube-proxy (a necessary but not sufficient network component) + +- Nodes were formerly called "minions" + + (You might see that word in older articles or documentation) + +--- + +## Kubernetes architecture: the control plane + +- The Kubernetes logic (its "brains") is a collection of services: + + - the API server (our point of entry to everything!) + + - core services like the scheduler and controller manager + + - `etcd` (a highly available key/value store; the "database" of Kubernetes) + +- Together, these services form the control plane of our cluster + +- The control plane is also called the "master" + +--- + +class: pic + +![One of the best Kubernetes architecture diagrams available](images/k8s-arch4-thanks-luxas.png) + +--- + +class: extra-details + +## Running the control plane on special nodes + +- PKS reserves dedicated node[s] for the control plane + +- This node is then called a "master" + + (Yes, this is ambiguous: is the "master" a node, or the whole control plane?) + +- Normal applications are restricted from running on this node + +- When high availability is required, each service of the control plane must be resilient + +- The control plane is then replicated on multiple nodes + + (This is sometimes called a "multi-master" setup) + +--- + +class: extra-details + +## Do we need to run Docker at all? + +No! + +-- + +- By default, Kubernetes uses the Docker Engine to run containers + +- We could also use `rkt` ("Rocket") from CoreOS + +- Or leverage other pluggable runtimes through the *Container Runtime Interface* + + (like CRI-O, or containerd) + +--- + +class: extra-details + +## Do we need to run Docker at all? + +Yes! + +-- + +- Our Kubernetes cluster is using Docker as the container engine + +- We still use it to build images and ship them around + +- We can do these things without Docker +
+ (and get diagnosed with NIH¹ syndrome) + +- Docker is still the most stable container engine today +
+ (but other options are maturing very quickly) + +.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)] + +--- + +class: extra-details + +## Do we need to run Docker at all? + +- On our development environments, CI pipelines ... : + + *Yes, almost certainly* + +- On our production servers: + + *Yes (today)* + + *Probably not (in the future)* + +.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)] + +--- + +## Interacting with Kubernetes + +- We will interact with our Kubernetes cluster through the Kubernetes API + +- The Kubernetes API is (mostly) RESTful + +- It allows us to create, read, update, delete *resources* + +- A few common resource types are: + + - node (a machine — physical or virtual — in our cluster) + + - pod (group of containers running together on a node) + + - service (stable network endpoint to connect to one or multiple containers) + +--- + +class: pic + +![Node, pod, container](images/k8s-arch3-thanks-weave.png) + +--- + +## Credits + +- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha) + + - it's one of the best Kubernetes architecture diagrams available! + +- The second diagram is courtesy of Weave Works + + - a *pod* can have multiple containers working together + + - IP addresses are associated with *pods*, not with individual containers + +Both diagrams used with permission. diff --git a/slides/pks/connecting.md b/slides/pks/connecting.md new file mode 100644 index 000000000..89d32f799 --- /dev/null +++ b/slides/pks/connecting.md @@ -0,0 +1,84 @@ +class: in-person + +## Connecting to our lab environment + +.exercise[ + +- Log into https://workshop.paulczar.wtf with your provided credentials + +- Follow the instructions on the auth portal to set up a `kubeconfig` file. + +- Check that you can connect to the cluster with `kubectl get nodes`: + +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +vm-0f2b473c-5ae6-4af3-4e80-f0a068b03abe Ready 23h v1.14.5 +vm-25cfc8d6-88c0-45f6-4305-05e859af7f2c Ready 23h v1.14.5 +... +... +``` +] + +If anything goes wrong — ask for help! + +--- + +## Doing or re-doing the workshop on your own? + +- Use something like + [Play-With-Docker](http://play-with-docker.com/) or + [Play-With-Kubernetes](https://training.play-with-kubernetes.com/) + + Zero setup effort; but environment are short-lived and + might have limited resources + +- Create your own cluster (local or cloud VMs) + + Small setup effort; small cost; flexible environments + +- Create a bunch of clusters for you and your friends + ([instructions](https://@@GITREPO@@/tree/master/prepare-vms)) + + Bigger setup effort; ideal for group training + +--- + +class: self-paced + +## Get your own Docker nodes + +- If you already have some Docker nodes: great! + +- If not: let's get some thanks to Play-With-Docker + +.exercise[ + +- Go to http://www.play-with-docker.com/ + +- Log in + +- Create your first node + + + +] + +You will need a Docker ID to use Play-With-Docker. + +(Creating a Docker ID is free.) + +--- + +## Terminals + +Once in a while, the instructions will say: +
"Open a new terminal." + +There are multiple ways to do this: + +- create a new window or tab on your machine, and SSH into the VM; + +- use screen or tmux on the VM and open a new window from there. + +You are welcome to use the method that you feel the most comfortable with. diff --git a/slides/pks/dashboard.md b/slides/pks/dashboard.md new file mode 100644 index 000000000..18cca1d72 --- /dev/null +++ b/slides/pks/dashboard.md @@ -0,0 +1,166 @@ +# The Kubernetes dashboard + +- Kubernetes resources can also be viewed with a web dashboard + +- That dashboard is usually exposed over HTTPS + + (this requires obtaining a proper TLS certificate) + +- Dashboard users need to authenticate + +- Most people just YOLO it into their cluster and then get hacked + +--- + +## Stop the madness + +You know what, this is all a very bad idea. Let's not run the Kubernetes dashboard at all ... ever. + +The following slides are informational. Do not run them. + +--- + +## The insecure method + +- We could (and should) use [Let's Encrypt](https://letsencrypt.org/) ... + +- ... but we don't want to deal with TLS certificates + +- We could (and should) learn how authentication and authorization work ... + +- ... but we will use a guest account with admin access instead + +.footnote[.warning[Yes, this will open our cluster to all kinds of shenanigans. Don't do this at home.]] + +--- + +## Running a very insecure dashboard + +- We are going to deploy that dashboard with *one single command* + +- This command will create all the necessary resources + + (the dashboard itself, the HTTP wrapper, the admin/guest account) + +- All these resources are defined in a YAML file + +- All we have to do is load that YAML file with with `kubectl apply -f` + +.exercise[ + +- Create all the dashboard resources, with the following command: + ```bash + kubectl apply -f ~/container.training/k8s/insecure-dashboard.yaml + ``` + +] + +--- + +## Connecting to the dashboard + +.exercise[ + +- Check which port the dashboard is on: + ```bash + kubectl get svc dashboard + ``` + +] + +You'll want the `3xxxx` port. + + +.exercise[ + +- Connect to http://oneofournodes:3xxxx/ + + + +] + +The dashboard will then ask you which authentication you want to use. + +--- + +## Dashboard authentication + +- We have three authentication options at this point: + + - token (associated with a role that has appropriate permissions) + + - kubeconfig (e.g. using the `~/.kube/config` file from `node1`) + + - "skip" (use the dashboard "service account") + +- Let's use "skip": we're logged in! + +-- + +.warning[By the way, we just added a backdoor to our Kubernetes cluster!] + +--- + +## Running the Kubernetes dashboard securely + +- The steps that we just showed you are *for educational purposes only!* + +- If you do that on your production cluster, people [can and will abuse it](https://redlock.io/blog/cryptojacking-tesla) + +- For an in-depth discussion about securing the dashboard, +
+ check [this excellent post on Heptio's blog](https://blog.heptio.com/on-securing-the-kubernetes-dashboard-16b09b1b7aca) + +--- + +# Security implications of `kubectl apply` + +- When we do `kubectl apply -f `, we create arbitrary resources + +- Resources can be evil; imagine a `deployment` that ... + +-- + + - starts bitcoin miners on the whole cluster + +-- + + - hides in a non-default namespace + +-- + + - bind-mounts our nodes' filesystem + +-- + + - inserts SSH keys in the root account (on the node) + +-- + + - encrypts our data and ransoms it + +-- + + - ☠️☠️☠️ + +--- + +## `kubectl apply` is the new `curl | sh` + +- `curl | sh` is convenient + +- It's safe if you use HTTPS URLs from trusted sources + +-- + +- `kubectl apply -f` is convenient + +- It's safe if you use HTTPS URLs from trusted sources + +- Example: the official setup instructions for most pod networks + +-- + +- It introduces new failure modes + + (for instance, if you try to apply YAML from a link that's no longer valid) diff --git a/slides/pks/helm.md b/slides/pks/helm.md new file mode 100644 index 000000000..6cab8141e --- /dev/null +++ b/slides/pks/helm.md @@ -0,0 +1,216 @@ +# Managing stacks with Helm + +- We created our first resources with `kubectl run`, `kubectl expose` ... + +- We have also created resources by loading YAML files with `kubectl apply -f` + +- For larger stacks, managing thousands of lines of YAML is unreasonable + +- These YAML bundles need to be customized with variable parameters + + (E.g.: number of replicas, image version to use ...) + +- It would be nice to have an organized, versioned collection of bundles + +- It would be nice to be able to upgrade/rollback these bundles carefully + +- [Helm](https://helm.sh/) is an open source project offering all these things! + +--- + +## Helm concepts + +- `helm` is a CLI tool + +- `tiller` is its companion server-side component + +- A "chart" is an archive containing templatized YAML bundles + +- Charts are versioned + +- Charts can be stored on private or public repositories + +-- + +*We're going to use the beta of Helm 3 as it does not require `tiller` making things simpler and more secure for us.* +--- + +## Installing Helm + +- If the `helm` 3 CLI is not installed in your environment, [install it](https://github.com/helm/helm/releases/tag/v3.0.0-beta.1) + +.exercise[ + +- Check if `helm` is installed: + ```bash + helm version + ``` +] + +-- + +```bash +version.BuildInfo{Version:"v3.0.0-beta.1", GitCommit:"f76b5f21adb53a85de8925f4a9d4f9bd99f185b5", GitTreeState:"clean", GoVersion:"go1.12.9"}` +``` + +--- + +## Oops you accidently a Helm 2 + +If `helm version` gives you a result like below it means you have helm 2 which requires the `tiller` server side component. + +``` +Client: &version.Version{SemVer:"v2.14.0", GitCommit:"05811b84a3f93603dd6c2fcfe57944dfa7ab7fd0", GitTreeState:"clean"} +Error: forwarding ports: error upgrading connection: pods "tiller-deploy-6fd87785-x8sxk" is forbidden: User "user1" cannot create resource "pods/portforward" in API group "" in the namespace "kube-system" +``` + +Run `EXPORT TILLER_NAMESPACE=` and try again. We've pre-installed `tiller` for you in your namespace just in case. + +-- + +Some of the commands in the following may not work in helm 2. Good luck! + +--- + +## Installing Tiller + +*If you were running Helm 2 you would need to install Tiller. We can skip this.* + +- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace + +- They can be managed (installed, upgraded...) with the `helm` CLI + +.exercise[ + +- Deploy Tiller: + ```bash + helm init + ``` + +] + +If Tiller was already installed, don't worry: this won't break it. + +At the end of the install process, you will see: + +``` +Happy Helming! +``` + +--- + +## Fix account permissions + +*If you were running Helm 2 you would need to install Tiller. We can skip this.* + +- Helm permission model requires us to tweak permissions + +- In a more realistic deployment, you might create per-user or per-team + service accounts, roles, and role bindings + +.exercise[ + +- Grant `cluster-admin` role to `kube-system:default` service account: + ```bash + kubectl create clusterrolebinding add-on-cluster-admin \ + --clusterrole=cluster-admin --serviceaccount=kube-system:default + ``` + +] + +(Defining the exact roles and permissions on your cluster requires +a deeper knowledge of Kubernetes' RBAC model. The command above is +fine for personal and development clusters.) + +--- + +## View available charts + +- A public repo is pre-configured when installing Helm + +- We can view available charts with `helm search` (and an optional keyword) + +.exercise[ + +- View all available charts: + ```bash + helm search hub + ``` + +- View charts related to `prometheus`: + ```bash + helm search hub prometheus + ``` + +] + +--- + +## Add the stable chart repository + +- Helm 3 does not come configured with any repositories, so we need to start by adding the stable repo. + +.exercise[ + - Add the stable repo + ```bash + helm repo add stable https://kubernetes-charts.storage.googleapis.com/ + ``` +] + +--- + +## Install a chart + +- Most charts use `LoadBalancer` service types by default + +- Most charts require persistent volumes to store data + +- We need to relax these requirements a bit + +.exercise[ + +- Install the Prometheus metrics collector on our cluster: + ```bash + helm install stable/prometheus \ + prometheus \ + --set server.service.type=ClusterIP \ + --set server.persistentVolume.enabled=false + ``` + +] + +Where do these `--set` options come from? + +--- + +## Inspecting a chart + +- `helm inspect` shows details about a chart (including available options) + +.exercise[ + +- See the metadata and all available options for `stable/prometheus`: + ```bash + helm inspect stable/prometheus + ``` + +] + +The chart's metadata includes a URL to the project's home page. + +(Sometimes it conveniently points to the documentation for the chart.) + +--- + +## Viewing installed charts + +- Helm keeps track of what we've installed + +.exercise[ + +- List installed Helm charts: + ```bash + helm list + ``` + +] diff --git a/slides/pks/ingress.md b/slides/pks/ingress.md new file mode 100644 index 000000000..b296ccfd7 --- /dev/null +++ b/slides/pks/ingress.md @@ -0,0 +1,247 @@ +# Exposing HTTP services with Ingress resources + +- *Services* give us a way to access a pod or a set of pods + +- Services can be exposed to the outside world: + + - with type `NodePort` (on a port >30000) + + - with type `LoadBalancer` (allocating an external load balancer) + +- What about HTTP services? + + - how can we expose `webui`, `rng`, `hasher`? + + - the Kubernetes dashboard? + + - a new version of `webui`? + +--- + +## Exposing HTTP services + +- If we use `NodePort` services, clients have to specify port numbers + + (i.e. http://xxxxx:31234 instead of just http://xxxxx) + +- `LoadBalancer` services are nice, but: + + - they are not available in all environments + + - they often carry an additional cost (e.g. they provision an ELB) + + - they require one extra step for DNS integration +
+ (waiting for the `LoadBalancer` to be provisioned; then adding it to DNS) + +- We could build our own reverse proxy + +--- + +## Building a custom reverse proxy + +- There are many options available: + + Apache, HAProxy, Hipache, NGINX, Traefik, ... + + (look at [jpetazzo/aiguillage](https://github.com/jpetazzo/aiguillage) for a minimal reverse proxy configuration using NGINX) + +- Most of these options require us to update/edit configuration files after each change + +- Some of them can pick up virtual hosts and backends from a configuration store + +- Wouldn't it be nice if this configuration could be managed with the Kubernetes API? + +-- + +- Enter.red[¹] *Ingress* resources! + +.footnote[.red[¹] Pun maybe intended.] + +--- + +## Ingress resources + +- Kubernetes API resource (`kubectl get ingress`/`ingresses`/`ing`) + +- Designed to expose HTTP services + +- Basic features: + + - load balancing + - SSL termination + - name-based virtual hosting + +- Can also route to different services depending on: + + - URI path (e.g. `/api`→`api-service`, `/static`→`assets-service`) + - Client headers, including cookies (for A/B testing, canary deployment...) + - and more! + +--- + +## Principle of operation + +- Step 1: deploy an *ingress controller* + + - ingress controller = load balancer + control loop + + - the control loop watches over ingress resources, and configures the LB accordingly + +- Step 2: set up DNS + + - associate DNS entries with the load balancer address + +- Step 3: create *ingress resources* + + - the ingress controller picks up these resources and configures the LB + +- Step 4: profit! + +--- + +## Ingress in action + +- We already have an nginx-ingress controller deployed + +- For DNS, we have a wildcard set up pointing at our ingress LB + + - `*.ingress.workshop.paulczar.wtf` + +- We will create ingress resources for various HTTP services + +--- + +## Checking that nginx-ingress runs correctly + +- If Traefik started correctly, we now have a web server listening on each node + +.exercise[ + +- Check that nginx is serving 80/tcp: + ```bash + curl test.ingress.workshop.paulczar.wtf + ``` + +] + +We should get a `404 page not found` error. + +This is normal: we haven't provided any ingress rule yet. + +--- + +## Expose that webui + +- Before we can enable the ingress, we need to create a service for the webui + +.exercise[ + + - create a service for the webui deployment + ```bash + kubectl expose deployment webui --port 80 + ``` + +] + +--- + + +## Setting up host-based routing ingress rules + +- We are going to create an ingress rule for our webui + +.exercise[ + - Write this to `~/workshop/ingress.yaml` and change the host prefix +] + +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: webui +spec: + rules: + - host: user1.ingress.workshop.paulczar.wtf + http: + paths: + - path: / + backend: + serviceName: webui + servicePort: 80 +``` + +--- + +## Creating our ingress resources + +.exercise[ + - Apply the ingress manifest + ```bash + kubectl apply -f ~/workshop/ingress.yaml + ``` +] + +-- + +```bash +$ curl user1.ingress.workshop.paulczar.wtf +Found. Redirecting to /index.html +``` + + +--- + +## Using multiple ingress controllers + +- You can have multiple ingress controllers active simultaneously + + (e.g. Traefik and NGINX) + +- You can even have multiple instances of the same controller + + (e.g. one for internal, another for external traffic) + +- The `kubernetes.io/ingress.class` annotation can be used to tell which one to use + +- It's OK if multiple ingress controllers configure the same resource + + (it just means that the service will be accessible through multiple paths) + +--- + +## Ingress: the good + +- The traffic flows directly from the ingress load balancer to the backends + + - it doesn't need to go through the `ClusterIP` + + - in fact, we don't even need a `ClusterIP` (we can use a headless service) + +- The load balancer can be outside of Kubernetes + + (as long as it has access to the cluster subnet) + +- This allows the use of external (hardware, physical machines...) load balancers + +- Annotations can encode special features + + (rate-limiting, A/B testing, session stickiness, etc.) + +--- + +## Ingress: the bad + +- Aforementioned "special features" are not standardized yet + +- Some controllers will support them; some won't + +- Even relatively common features (stripping a path prefix) can differ: + + - [traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip](https://docs.traefik.io/user-guide/kubernetes/#path-based-routing) + + - [ingress.kubernetes.io/rewrite-target: /](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/rewrite) + +- This should eventually stabilize + + (remember that ingresses are currently `apiVersion: networking.k8s.io/v1beta1`) diff --git a/slides/pks/kubectlexpose.md b/slides/pks/kubectlexpose.md new file mode 100644 index 000000000..38b762e3f --- /dev/null +++ b/slides/pks/kubectlexpose.md @@ -0,0 +1,363 @@ +# Exposing containers + +- `kubectl expose` creates a *service* for existing pods + +- A *service* is a stable address for a pod (or a bunch of pods) + +- If we want to connect to our pod(s), we need to create a *service* + +- Once a service is created, CoreDNS will allow us to resolve it by name + + (i.e. after creating service `hello`, the name `hello` will resolve to something) + +- There are different types of services, detailed on the following slides: + + `ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName` + +--- + +## Basic service types + +- `ClusterIP` (default type) + + - a virtual IP address is allocated for the service (in an internal, private range) + - this IP address is reachable only from within the cluster (nodes and pods) + - our code can connect to the service using the original port number + +- `NodePort` + + - a port is allocated for the service (by default, in the 30000-32768 range) + - that port is made available *on all our nodes* and anybody can connect to it + - our code must be changed to connect to that new port number + +These service types are always available. + +Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` rules. + +--- + +## More service types + +- `LoadBalancer` + + - an external load balancer is allocated for the service + - the load balancer is configured accordingly +
(e.g.: a `NodePort` service is created, and the load balancer sends traffic to that port) + - available only when the underlying infrastructure provides some "load balancer as a service" +
(e.g. AWS, Azure, GCE, OpenStack...) + +- `ExternalName` + + - the DNS entry managed by CoreDNS will just be a `CNAME` to a provided record + - no port, no IP address, no nothing else is allocated + +--- + +## Running containers with open ports + +- Since `ping` doesn't have anything to connect to, we'll have to run something else + +- We could use the `nginx` official image, but ... + + ... we wouldn't be able to tell the backends from each other! + +- We are going to use `jpetazzo/httpenv`, a tiny HTTP server written in Go + +- `jpetazzo/httpenv` listens on port 8888 + +- It serves its environment variables in JSON format + +- The environment variables will include `HOSTNAME`, which will be the pod name + + (and therefore, will be different on each backend) + +--- + +## Creating a deployment for our HTTP server + +- We *could* do `kubectl run httpenv --image=jpetazzo/httpenv` ... + +- But since `kubectl run` is being deprecated, let's see how to use `kubectl create` instead + +.exercise[ + +- In another window, watch the pods (to see when they are created): + ```bash + kubectl get pods -w + ``` + + + +- Create a deployment for this very lightweight HTTP server: + ```bash + kubectl create deployment httpenv --image=jpetazzo/httpenv + ``` + +- Scale it to 3 replicas: + ```bash + kubectl scale deployment httpenv --replicas=3 + ``` + +] + +--- + +## Exposing our deployment + +- We'll create a default `ClusterIP` service + +.exercise[ + +- Expose the HTTP port of our server: + ```bash + kubectl expose deployment httpenv --port 8888 + ``` + +- Look up which IP address was allocated: + ```bash + kubectl get service + ``` + +] + +--- + +## Services are layer 4 constructs + +- You can assign IP addresses to services, but they are still *layer 4* + + (i.e. a service is not an IP address; it's an IP address + protocol + port) + +- This is caused by the current implementation of `kube-proxy` + + (it relies on mechanisms that don't support layer 3) + +- As a result: you *have to* indicate the port number for your service + +- Running services with arbitrary port (or port ranges) requires hacks + + (e.g. host networking mode) + +--- + +## Testing our service + +- We will now send a few HTTP requests to our pods + +.exercise[ + +- Let's obtain the IP address that was allocated for our service, *programmatically:* + ```bash + IP=$(kubectl get svc httpenv -o go-template --template '{{ .spec.clusterIP }}') + ``` + +- Send a few requests: + ```bash + curl http://$IP:8888/ + ``` + +- Too much output? Filter it with `jq`: + ```bash + curl -s http://$IP:8888/ | jq .HOSTNAME + ``` + +] + +-- + +Oh right, that doesn't work, its a `cluster-ip`. We need another way to access it. + +--- + +## port forwarding + +- You can forward a local port from your machine into a pod + +.exercise[ + +- Forward a port into your deployment: + ```bash + kubectl port-forward service/httpenv 8888:8888 + ``` + +- In a new window run curl a few times: + ```bash + curl localhost:8888 + curl localhost:8888 + curl localhost:8888 + ``` + +- Hit `ctrl-c` in the original window to terminate the port-forward +] + +-- + +The response was the same from each request. This is because `kubectl port-forward` forwards to a specific pod, not to the cluster-ip. + +--- + +class: extra-details + +## If we don't need a clusterIP load balancer + +- Sometimes, we want to access our scaled services directly: + + - if we want to save a tiny little bit of latency (typically less than 1ms) + + - if we need to connect over arbitrary ports (instead of a few fixed ones) + + - if we need to communicate over another protocol than UDP or TCP + + - if we want to decide how to balance the requests client-side + + - ... + +- In that case, we can use a "headless service" + +--- + +class: extra-details + +## Headless services + +- A headless service is obtained by setting the `clusterIP` field to `None` + + (Either with `--cluster-ip=None`, or by providing a custom YAML) + +- As a result, the service doesn't have a virtual IP address + +- Since there is no virtual IP address, there is no load balancer either + +- CoreDNS will return the pods' IP addresses as multiple `A` records + +- This gives us an easy way to discover all the replicas for a deployment + +--- + +class: extra-details + +## Services and endpoints + +- A service has a number of "endpoints" + +- Each endpoint is a host + port where the service is available + +- The endpoints are maintained and updated automatically by Kubernetes + +.exercise[ + +- Check the endpoints that Kubernetes has associated with our `httpenv` service: + ```bash + kubectl describe service httpenv + ``` + +] + +In the output, there will be a line starting with `Endpoints:`. + +That line will list a bunch of addresses in `host:port` format. + +--- + +class: extra-details + +## Viewing endpoint details + +- When we have many endpoints, our display commands truncate the list + ```bash + kubectl get endpoints + ``` + +- If we want to see the full list, we can use one of the following commands: + ```bash + kubectl describe endpoints httpenv + kubectl get endpoints httpenv -o yaml + ``` + +- These commands will show us a list of IP addresses + +- These IP addresses should match the addresses of the corresponding pods: + ```bash + kubectl get pods -l app=httpenv -o wide + ``` + +--- + +class: extra-details + +## `endpoints` not `endpoint` + +- `endpoints` is the only resource that cannot be singular + +```bash +$ kubectl get endpoint +error: the server doesn't have a resource type "endpoint" +``` + +- This is because the type itself is plural (unlike every other resource) + +- There is no `endpoint` object: `type Endpoints struct` + +- The type doesn't represent a single endpoint, but a list of endpoints + +--- + +## Exposing services to the outside world + +- The default type (ClusterIP) only works for internal traffic + +- If we want to accept external traffic, we can use one of these: + + - NodePort (expose a service on a TCP port between 30000-32768) + + - LoadBalancer (provision a cloud load balancer for our service) + + - ExternalIP (use one node's external IP address) + + - Ingress (a special mechanism for HTTP services) + +*We'll see NodePorts and Ingresses more in detail later.* + +--- + +## Exposing services to the outside world + +.exercise[ + +- Set the service to be of type `Loadbalancer`: + ```bash + kubectl patch svc httpenv -p '{"spec": {"type": "LoadBalancer"}}' + ``` + +- Check for the IP of the loadbalancer: + ```bash + kubectl get svc httpenv + ``` + +- Test access via the loadbalancer: + ```bash + curl :8888 + ``` +] + +-- + +The `kubectl patch` command lets you patch a kubernetes resource to make minor changes like the above modification of the service type. + +--- + +## Cleanup + +.exercise[ + +- Delete the service + ```bash + kubectl delete svc httpenv + ``` + +- Delete the deployment + ```bash + kubectl delete deployment httpenv + ``` + +] diff --git a/slides/pks/kubectlget.md b/slides/pks/kubectlget.md new file mode 100644 index 000000000..65e8c4eca --- /dev/null +++ b/slides/pks/kubectlget.md @@ -0,0 +1,375 @@ +# First contact with `kubectl` + +- `kubectl` is (almost) the only tool we'll need to talk to Kubernetes + +- It is a rich CLI tool around the Kubernetes API + + (Everything you can do with `kubectl`, you can do directly with the API) + +- On our machines, there is a `~/.kube/config` file with: + + - the Kubernetes API address + + - the path to our TLS certificates used to authenticate + +- You can also use the `--kubeconfig` flag to pass a config file + +- Or directly `--server`, `--user`, etc. + +- `kubectl` can be pronounced "Cube C T L", "Cube cuttle", "Cube cuddle"... + +--- + +## `kubectl get` + +- Let's look at our `Node` resources with `kubectl get`! + +.exercise[ + +- Look at the composition of our cluster: + ```bash + kubectl get node + ``` + +- These commands are equivalent: + ```bash + kubectl get no + kubectl get node + kubectl get nodes + ``` + +] + +--- + +## Obtaining machine-readable output + +- `kubectl get` can output JSON, YAML, or be directly formatted + +.exercise[ + +- Give us more info about the nodes: + ```bash + kubectl get nodes -o wide + ``` + +- Let's have some YAML: + ```bash + kubectl get no -o yaml + ``` + See that `kind: List` at the end? It's the type of our result! + +] + +--- + +## (Ab)using `kubectl` and `jq` + +- It's super easy to build custom reports + +.exercise[ + +- Show the capacity of all our nodes as a stream of JSON objects: + ```bash + kubectl get nodes -o json | + jq ".items[] | {name:.metadata.name} + .status.capacity" + ``` + +] + +--- + +class: extra-details + +## Exploring types and definitions + +- We can list all available resource types by running `kubectl api-resources` +
+ (In Kubernetes 1.10 and prior, this command used to be `kubectl get`) + +- We can view the definition for a resource type with: + ```bash + kubectl explain type + ``` + +- We can view the definition of a field in a resource, for instance: + ```bash + kubectl explain node.spec + ``` + +- Or get the full definition of all fields and sub-fields: + ```bash + kubectl explain node --recursive + ``` + +--- + +class: extra-details + +## Introspection vs. documentation + +- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference) + +- The API documentation is usually easier to read, but: + + - it won't show custom types (like Custom Resource Definitions) + + - we need to make sure that we look at the correct version + +- `kubectl api-resources` and `kubectl explain` perform *introspection* + + (they communicate with the API server and obtain the exact type definitions) + +--- + +## Type names + +- The most common resource names have three forms: + + - singular (e.g. `node`, `service`, `deployment`) + + - plural (e.g. `nodes`, `services`, `deployments`) + + - short (e.g. `no`, `svc`, `deploy`) + +- Some resources do not have a short name + +- `Endpoints` only have a plural form + + (because even a single `Endpoints` resource is actually a list of endpoints) + +--- + +## Viewing details + +- We can use `kubectl get -o yaml` to see all available details + +- However, YAML output is often simultaneously too much and not enough + +- For instance, `kubectl get node node1 -o yaml` is: + + - too much information (e.g.: list of images available on this node) + + - not enough information (e.g.: doesn't show pods running on this node) + + - difficult to read for a human operator + +- For a comprehensive overview, we can use `kubectl describe` instead + +--- + +## `kubectl describe` + +- `kubectl describe` needs a resource type and (optionally) a resource name + +- It is possible to provide a resource name *prefix* + + (all matching objects will be displayed) + +- `kubectl describe` will retrieve some extra information about the resource + +.exercise[ + +- Look at the information available for `node1` with one of the following commands: + ```bash + kubectl describe node/node1 + kubectl describe node node1 + ``` + +] + +(We should notice a bunch of control plane pods.) + +--- + +## Services + +- A *service* is a stable endpoint to connect to "something" + + (In the initial proposal, they were called "portals") + +.exercise[ + +- List the services on our cluster with one of these commands: + ```bash + kubectl get services + kubectl get svc + ``` + +] + +-- + +There should be no services. This is because you're not running anything yet. But there are some services running in other namespaces. + +--- + +## Services + +- A *service* is a stable endpoint to connect to "something" + + (In the initial proposal, they were called "portals") + +.exercise[ + +- List the services on our cluster with one of these commands: + ```bash + kubectl get services --all-namespaces + kubectl get svc --all-namespaces + ``` + +] + +-- + +There's a bunch of services already running that are used in the operations of the Kubernetes cluster. + +--- + +## ClusterIP services + +- A `ClusterIP` service is internal, available from the cluster only + +- This is useful for introspection from within containers + +.exercise[ + +- Try to connect to the API: + ```bash + curl -k https://`10.100.200.1` + ``` + + - `-k` is used to skip certificate verification + + - Make sure to replace 10.100.200.1 with the CLUSTER-IP for the `kubernetes` service shown by `kubectl get svc` + +] + +-- + +The Cluster IP is only accessible from inside the cluster. We'll explore other ways to expose a service later. + +--- + +## Listing running containers + +- Containers are manipulated through *pods* + +- A pod is a group of containers: + + - running together (on the same node) + + - sharing resources (RAM, CPU; but also network, volumes) + +.exercise[ + +- List pods on our cluster: + ```bash + kubectl get pods + ``` + +] + +-- + +*Where are the pods that we saw just a moment earlier?!?* + +--- + +## Namespaces + +- Namespaces allow us to segregate resources + +.exercise[ + +- List the namespaces on our cluster with one of these commands: + ```bash + kubectl get namespaces + kubectl get namespace + kubectl get ns + ``` + +] + +-- + +*You know what ... This `kube-system` thing looks suspicious.* + +*In fact, I'm pretty sure it showed up earlier, when we did:* + +`kubectl describe node node1` + +--- + +## Accessing namespaces + +- By default, `kubectl` uses the `default` namespace + +- We can see resources in all namespaces with `--all-namespaces` + +.exercise[ + +- List the pods in all namespaces: + ```bash + kubectl get pods --all-namespaces + ``` + +- Since Kubernetes 1.14, we can also use `-A` as a shorter version: + ```bash + kubectl get pods -A + ``` + +] + +*Here are our system pods!* + +--- + +## What are all these control plane pods? + +- `kube-apiserver` is the API server + +- `coredns` provides DNS-based service discovery ([replacing kube-dns as of 1.11](https://kubernetes.io/blog/2018/07/10/coredns-ga-for-kubernetes-cluster-dns/)) + + +- the `READY` column indicates the number of containers in each pod + + (1 for most pods, but `coredns` has 3, for instance) + +--- + +## Scoping another namespace + +- We can also look at a different namespace (other than `default`) + +.exercise[ + +- List only the pods in the `kube-system` namespace: + ```bash + kubectl get pods --namespace=kube-system + kubectl get pods -n kube-system + ``` + +] + +--- + +## Namespaces and other `kubectl` commands + +- We can use `-n`/`--namespace` with almost every `kubectl` command + +- Example: + + - `kubectl create --namespace=X` to create something in namespace X + +- We can use `-A`/`--all-namespaces` with most commands that manipulate multiple objects + +- Examples: + + - `kubectl delete` can delete resources across multiple namespaces + + - `kubectl label` can add/remove/update labels across multiple namespaces + +-- + +**These commands will not work for you, as you are restricted by Role Based Authentication to only have write access inside your own namespace.** \ No newline at end of file diff --git a/slides/pks/logistics.md b/slides/pks/logistics.md new file mode 100644 index 000000000..1726024d9 --- /dev/null +++ b/slides/pks/logistics.md @@ -0,0 +1,21 @@ +## Intros + +- This slide should be customized by the tutorial instructor(s). + +- Hello! We are: + + - .emoji[👨🏾‍🎓] Paul Czarkowski ([@pczarkowski](https://twitter.com/pczarkowski), Pivotal Software) + - .emoji[👨🏾‍🎓] Tyler Britten ([@tybritten](https://twitter.com/tybritten), Pivotal Software) + + +- The workshop will run from ... + +- There will be a lunch break at ... + + (And coffee breaks!) + +- Feel free to interrupt for questions at any time + +- *Especially when you see full screen container pictures!* + +- Live feedback, questions, help: @@CHAT@@ diff --git a/slides/pks/octant.md b/slides/pks/octant.md new file mode 100644 index 000000000..2f854fb53 --- /dev/null +++ b/slides/pks/octant.md @@ -0,0 +1,14 @@ +# Octant + +Octant is an open source tool from VMWare which is designed to be a Kubernetes workload visualization tool that runs locally and uses your Kubeconfig to connect to the Kubernetes cluster. + +Octant only ever performs list and read style requests and does not create/modify/delete resources. This makes it a much safer tool to use than the Kubernetes Dashboard. + +.exercise[ + +- Run octant and browse through your resources: + ```bash + octant + ``` + +] \ No newline at end of file diff --git a/slides/pks/ourapponkube.md b/slides/pks/ourapponkube.md new file mode 100644 index 000000000..8cf858b5b --- /dev/null +++ b/slides/pks/ourapponkube.md @@ -0,0 +1,139 @@ +# Running our application on Kubernetes + +- We can now deploy our code (as well as a redis instance) + +.exercise[ + +- Deploy `redis`: + ```bash + kubectl create deployment redis --image=redis + ``` + +- Deploy everything else: + ```bash + for SERVICE in hasher rng webui worker; do + kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG + done + ``` + +] + +--- + +## Is this working? + +- After waiting for the deployment to complete, let's look at the logs! + + (Hint: use `kubectl get deploy -w` to watch deployment events) + +.exercise[ + + + +- Look at some logs: + ```bash + kubectl logs deploy/rng + kubectl logs deploy/worker + ``` + +] + +-- + +🤔 `rng` is fine ... But not `worker`. + +-- + +💡 Oh right! We forgot to `expose`. + +--- + +## Connecting containers together + +- Three deployments need to be reachable by others: `hasher`, `redis`, `rng` + +- `worker` doesn't need to be exposed + +- `webui` will be dealt with later + +.exercise[ + +- Expose each deployment, specifying the right port: + ```bash + kubectl expose deployment redis --port 6379 + kubectl expose deployment rng --port 80 + kubectl expose deployment hasher --port 80 + ``` + +] + +--- + +## Is this working yet? + +- The `worker` has an infinite loop, that retries 10 seconds after an error + +.exercise[ + +- Stream the worker's logs: + ```bash + kubectl logs deploy/worker --follow + ``` + + (Give it about 10 seconds to recover) + + + +] + +-- + +We should now see the `worker`, well, working happily. + +--- + +## Exposing services for external access + +- Now we would like to access the Web UI + +- We will use `kubectl port-forward` because we don't want the whole world to see it. + +.exercise[ + +- Create a port forward for the Web UI: + ```bash + kubectl port-forward deploy/webui 8888:80 + ``` +- In a new terminal check you can access it: + ```bash + curl localhost:8888 + ``` +] + +-- + +The output `Found. Redirecting to /index.html` tells us the port forward worked. + +--- + +## Accessing the web UI + +- We can now access the web UI from the port-forward. But nobody else can. + +.exercise[ + +- Open the web UI in your browser (http://localhost:8888/) + + + +] + +-- + +*Alright, we're back to where we started, when we were running on a single node!* diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md new file mode 100644 index 000000000..9607699da --- /dev/null +++ b/slides/pks/prereqs.md @@ -0,0 +1,115 @@ +# Pre-requirements + +- Be comfortable with the UNIX command line + + - navigating directories + + - editing files + + - a little bit of bash-fu (environment variables, loops) + +- Some Docker knowledge + + - `docker run`, `docker ps`, `docker build` + + - ideally, you know how to write a Dockerfile and build it +
+ (even if it's a `FROM` line and a couple of `RUN` commands) + +- It's totally OK if you are not a Docker expert! + +--- + +## software pre-requirements + +- You'll need the following software installed on your local laptop: + +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +* [helm](https://helm.sh/docs/using_helm/#installing-helm) + +- Bonus tools + +* [octant](https://github.com/vmware/octant#installation) +* [stern]() +* [jq]() + +--- + +class: title + +*Tell me and I forget.* +
+*Teach me and I remember.* +
+*Involve me and I learn.* + +Misattributed to Benjamin Franklin + +[(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/) + +--- + +## Hands-on sections + +- The whole workshop is hands-on + +- You are invited to reproduce all the demos + +- You will be using conference wifi and a shared kubernetes cluster. Please be kind to both. + +- All hands-on sections are clearly identified, like the gray rectangle below + +.exercise[ + +- This is the stuff you're supposed to do! + +- Go to @@SLIDES@@ to view these slides + +- Join the chat room: @@CHAT@@ + + + +] + +--- + +class: in-person + +## Where are we going to run our containers? + +--- + +class: in-person + +## shared cluster dedicated to this workshop + +- A large Pivotal Container Service (PKS) cluster deployed to Google Cloud. + +- It remain up for the duration of the workshop + +- You should have a little card with login+password+URL + +- Logging into this URL will give you a downloadable kubeconfig file. + +--- + +class: in-person + +## Why don't we run containers locally? + +- Installing this stuff can be hard on some machines + + (32 bits CPU or OS... Laptops without administrator access... etc.) + +- *"The whole team downloaded all these container images from the WiFi! +
... and it went great!"* (Literally no-one ever) + +- All you need is a computer (or even a phone or tablet!), with: + + - an internet connection + + - a web browser + + - kubectl + + - helm diff --git a/slides/pks/scalingdockercoins.md b/slides/pks/scalingdockercoins.md new file mode 100644 index 000000000..527058c8e --- /dev/null +++ b/slides/pks/scalingdockercoins.md @@ -0,0 +1,241 @@ +# Scaling our demo app + +- Our ultimate goal is to get more DockerCoins + + (i.e. increase the number of loops per second shown on the web UI) + +- Let's look at the architecture again: + + ![DockerCoins architecture](images/dockercoins-diagram.svg) + +- The loop is done in the worker; + perhaps we could try adding more workers? + +--- + +## Adding another worker + +- All we have to do is scale the `worker` Deployment + +.exercise[ + +- Open two new terminals to check what's going on with pods and deployments: + ```bash + kubectl get pods -w + kubectl get deployments -w + ``` + + + +- Now, create more `worker` replicas: + ```bash + kubectl scale deployment worker --replicas=2 + ``` + +] + +After a few seconds, the graph in the web UI should show up. + +--- + +## Adding more workers + +- If 2 workers give us 2x speed, what about 3 workers? + +.exercise[ + +- Scale the `worker` Deployment further: + ```bash + kubectl scale deployment worker --replicas=3 + ``` + +] + +The graph in the web UI should go up again. + +(This is looking great! We're gonna be RICH!) + +--- + +## Adding even more workers + +- Let's see if 10 workers give us 10x speed! + +.exercise[ + +- Scale the `worker` Deployment to a bigger number: + ```bash + kubectl scale deployment worker --replicas=10 + ``` + +] + +-- + +The graph will peak at 10 hashes/second. + +(We can add as many workers as we want: we will never go past 10 hashes/second.) + +--- + +class: extra-details + +## Didn't we briefly exceed 10 hashes/second? + +- It may *look like it*, because the web UI shows instant speed + +- The instant speed can briefly exceed 10 hashes/second + +- The average speed cannot + +- The instant speed can be biased because of how it's computed + +--- + +class: extra-details + +## Why instant speed is misleading + +- The instant speed is computed client-side by the web UI + +- The web UI checks the hash counter once per second +
+ (and does a classic (h2-h1)/(t2-t1) speed computation) + +- The counter is updated once per second by the workers + +- These timings are not exact +
+ (e.g. the web UI check interval is client-side JavaScript) + +- Sometimes, between two web UI counter measurements, +
+ the workers are able to update the counter *twice* + +- During that cycle, the instant speed will appear to be much bigger +
+ (but it will be compensated by lower instant speed before and after) + +--- + +## Why are we stuck at 10 hashes per second? + +- If this was high-quality, production code, we would have instrumentation + + (Datadog, Honeycomb, New Relic, statsd, Sumologic, ...) + +- It's not! + +- Perhaps we could benchmark our web services? + + (with tools like `ab`, or even simpler, `httping`) + +--- + +## Benchmarking our web services + +- We want to check `hasher` and `rng` + +- We are going to use `httping` + +- It's just like `ping`, but using HTTP `GET` requests + + (it measures how long it takes to perform one `GET` request) + +- It's used like this: + ``` + httping [-c count] http://host:port/path + ``` + +- Or even simpler: + ``` + httping ip.ad.dr.ess + ``` + +- We will use `httping` on the ClusterIP addresses of our services + +--- + +## Running a debug pod + +We don't have direct access to ClusterIP services, nor do we want to run a bunch of port-forwards. Instead we can run a Pod containing `httping` and then use `kubectl exec` to perform our debugging. + +.excercise[ + +- Run a debug pod + ```bash + kubectl run debug --image=paulczar/debug \ + --restart=Never -- sleep 6000 + ``` + +] + +-- + +This will run our debug pod which contains tools like `httping` that will self-destruct after 6000 seconds. + +--- + +### Executing a command in a running pod + +- You may have need to occasionally run a command inside a pod. Rather than trying to run `SSH` inside a container you can use the `kubectl exec` command. + +.excercise[ + + - Run curl inside your debug pod: + ```bash + kubectl exec debug -- curl -s https://google.com + ``` +] + +-- + +```html + +301 Moved +

301 Moved

+The document has moved +here. + +``` + +--- + +## Service Discovery + +- Each of our services has a Cluster IP which we could get using `kubectl get services` + +- Or do it programmatically, like so: + ```bash + HASHER=$(kubectl get svc hasher -o go-template={{.spec.clusterIP}}) + RNG=$(kubectl get svc rng -o go-template={{.spec.clusterIP}}) + ``` + +- However Kubernetes has an in-cluster DNS server which means if you're inside the cluster you can simple use the service name as an endpoint. + +--- + +## Checking `hasher` and `rng` response times + +.exercise[ + +- Check the response times for both services: + ```bash + kubectl exec debug -- httping -c 3 hasher + kubectl exec debug -- httping -c 3 rng + ``` + +] + +-- + +- `hasher` is fine (it should take a few milliseconds to reply) + +- `rng` is not (it should take about 700 milliseconds if there are 10 workers) + +- Something is wrong with `rng`, but ... what? diff --git a/slides/pks/setup-k8s.md b/slides/pks/setup-k8s.md new file mode 100644 index 000000000..8a6f5b875 --- /dev/null +++ b/slides/pks/setup-k8s.md @@ -0,0 +1,108 @@ +# Setting up Kubernetes + +How did we set up these Kubernetes clusters that we're using? + +-- + +- We used Pivotal Container Service (PKS) a multicloud Kubernetes broker. + +- But first we Created a GKE Kubernetes cluster + - We installed the Google Cloud Operator on GKE + - We installed PKS using the GCP Operator + - We installed this Kubernetes cluster using PKS + +--- + +# Setting up Kubernetes + +- How can I set up a basic Kubernetes lab at home? + +-- + + + +- Run `kubeadm` on freshly installed VM instances running Ubuntu LTS + + 1. Install Docker + + 2. Install Kubernetes packages + + 3. Run `kubeadm init` on the first node (it deploys the control plane on that node) + + 4. Set up Weave (the overlay network) +
+ (that step is just one `kubectl apply` command; discussed later) + + 5. Run `kubeadm join` on the other nodes (with the token produced by `kubeadm init`) + + 6. Copy the configuration file generated by `kubeadm init` + +- Check the [prepare VMs README](https://@@GITREPO@@/blob/master/prepare-vms/README.md) for more details + +--- + +## `kubeadm` drawbacks + +- Doesn't set up Docker or any other container engine + +- Doesn't set up the overlay network + +- Doesn't set up multi-master (no high availability) + +-- + + (At least ... not yet! Though it's [experimental in 1.12](https://kubernetes.io/docs/setup/independent/high-availability/).) + +-- + +- "It's still twice as many steps as setting up a Swarm cluster 😕" -- Jérôme + +--- + +## Other deployment options + +- [AKS](https://azure.microsoft.com/services/kubernetes-service/): + managed Kubernetes on Azure + +- [GKE](https://cloud.google.com/kubernetes-engine/): + managed Kubernetes on Google Cloud + +- [EKS](https://aws.amazon.com/eks/), + [eksctl](https://eksctl.io/): + managed Kubernetes on AWS + +- [kops](https://github.com/kubernetes/kops): + customizable deployments on AWS, Digital Ocean, GCE (beta), vSphere (alpha) + +- [minikube](https://kubernetes.io/docs/setup/minikube/), + [kubespawn](https://github.com/kinvolk/kube-spawn), + [Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/): + for local development + +- [kubicorn](https://github.com/kubicorn/kubicorn), + the [Cluster API](https://blogs.vmware.com/cloudnative/2019/03/14/what-and-why-of-cluster-api/): + deploy your clusters declaratively, "the Kubernetes way" + +--- + +## Even more deployment options + +- If you like Ansible: + [kubespray](https://github.com/kubernetes-incubator/kubespray) + +- If you like Terraform: + [typhoon](https://github.com/poseidon/typhoon) + +- If you like Terraform and Puppet: + [tarmak](https://github.com/jetstack/tarmak) + +- You can also learn how to install every component manually, with + the excellent tutorial [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) + + *Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.* + +- There are also many commercial options available! + +- For a longer list, check the Kubernetes documentation: +
+ it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/#production-environment) to set up Kubernetes. From f3dd189d07781af5ba43739218d3cac5932732cb Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Wed, 28 Aug 2019 10:34:52 -0500 Subject: [PATCH 02/14] scripts for preparing pks cluster --- prepare-pks/README.md | 36 ++++++++++++++++++ prepare-pks/users/create.sh | 10 +++++ prepare-pks/users/delete.sh | 10 +++++ prepare-pks/users/helm.sh | 31 +++++++++++++++ prepare-pks/users/random-users.sh | 11 ++++++ prepare-pks/users/user-role-etc.yaml | 57 ++++++++++++++++++++++++++++ prepare-pks/users/users.txt | 2 + 7 files changed, 157 insertions(+) create mode 100644 prepare-pks/README.md create mode 100755 prepare-pks/users/create.sh create mode 100755 prepare-pks/users/delete.sh create mode 100755 prepare-pks/users/helm.sh create mode 100755 prepare-pks/users/random-users.sh create mode 100644 prepare-pks/users/user-role-etc.yaml create mode 100644 prepare-pks/users/users.txt diff --git a/prepare-pks/README.md b/prepare-pks/README.md new file mode 100644 index 000000000..688c27945 --- /dev/null +++ b/prepare-pks/README.md @@ -0,0 +1,36 @@ +# Instructions for preparing a PKS Kubernetes Cluster + +## pre-reqs + +* ingress controller (nginx or nsxt) +* gangway (or similar for kubeconfig files) + +## Create users + +This example will create 50 random users in UAAC and corresponding Kubernetes users and rbac. + +```bash +$ cd users +$ ./random-users.sh 50 +... +... +$ ./create.sh +... +... +``` + +This will install helm tiller for each: + +```bash +$ ./helm.sh +... +... +``` + +This will clean up: + +```bash +$ ./delete.sh +... +... +``` diff --git a/prepare-pks/users/create.sh b/prepare-pks/users/create.sh new file mode 100755 index 000000000..98523fe61 --- /dev/null +++ b/prepare-pks/users/create.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + echo "--> Adding user $col1 with password $col2" + echo "====> UAAC" + uaac user add $col1 --emails $col1@pks -p $col2 + echo "====> Kubernetes" + cat user-role-etc.yaml | sed "s/__username__/$col1/" | kubectl apply -f - +done < users.txt diff --git a/prepare-pks/users/delete.sh b/prepare-pks/users/delete.sh new file mode 100755 index 000000000..6fff38f54 --- /dev/null +++ b/prepare-pks/users/delete.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + echo "--> Deleting user $col1 with password $col2" + echo "====> UAAC" + uaac user delete $col1 + echo "====> Kubernetes" + cat user-role-etc.yaml | sed "s/__username__/$col1/" | kubectl delete -f - +done < users.txt \ No newline at end of file diff --git a/prepare-pks/users/helm.sh b/prepare-pks/users/helm.sh new file mode 100755 index 000000000..cdda4ede1 --- /dev/null +++ b/prepare-pks/users/helm.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + +kubectl -n $col1 create serviceaccount tiller + +kubectl -n $col1 create role tiller --verb '*' --resource '*' + +kubectl -n $col1 create rolebinding tiller --role tiller --serviceaccount ${col1}:tiller + +kubectl create clusterrole ns-tiller --verb 'get,list' --resource namespaces + +kubectl create clusterrolebinding tiller --clusterrole ns-tiller --serviceaccount ${col1}:tiller + +helm init --service-account=tiller --tiller-namespace=$col1 + +kubectl -n $col1 delete service tiller-deploy + +kubectl -n $col1 patch deployment tiller-deploy --patch ' +spec: + template: + spec: + containers: + - name: tiller + ports: [] + command: ["/tiller"] + args: ["--listen=localhost:44134"] +' + +done < users.txt diff --git a/prepare-pks/users/random-users.sh b/prepare-pks/users/random-users.sh new file mode 100755 index 000000000..e299afc19 --- /dev/null +++ b/prepare-pks/users/random-users.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [[ -z $1 ]]; then + echo "Usage: ./random-names.sh 55" + exit 1 +fi + +for i in {1..50}; do + PW=`cat /dev/urandom | tr -dc 'a-zA-Z1-9' | fold -w 10 | head -n 1` + echo "user$i,$PW" +done diff --git a/prepare-pks/users/user-role-etc.yaml b/prepare-pks/users/user-role-etc.yaml new file mode 100644 index 000000000..0b5dc75cc --- /dev/null +++ b/prepare-pks/users/user-role-etc.yaml @@ -0,0 +1,57 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: __username__ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbac-user-namespace +rules: +- apiGroups: ["", "extensions", "apps", "batch", "autoscaling","networking.k8s.io"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbac-user-cluster +rules: +- apiGroups: ["", "extensions", "apps"] + resources: ["*"] + verbs: ["list"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["list","get"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["*"] + verbs: ["list"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __username__ + namespace: __username__ +subjects: +- kind: User + name: __username__ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: rbac-user-namespace + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __username__ + namespace: __username__ +subjects: +- kind: User + name: __username__ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: rbac-user-cluster + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/prepare-pks/users/users.txt b/prepare-pks/users/users.txt new file mode 100644 index 000000000..450cdc67d --- /dev/null +++ b/prepare-pks/users/users.txt @@ -0,0 +1,2 @@ +user1,user1-password +user2,user2-password From 4d35b81a83c1b0bcf09d91b0639efc8aa69aa4dd Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Thu, 29 Aug 2019 16:45:57 -0500 Subject: [PATCH 03/14] helm for pks --- slides/kube-fullday-namespaced.yml | 1 + slides/pks/helm-wordpress.md | 109 +++++++++++++++++++++++++++++ slides/pks/helm.md | 17 ++--- slides/pks/wp/values.yaml | 18 +++++ 4 files changed, 137 insertions(+), 8 deletions(-) create mode 100644 slides/pks/helm-wordpress.md create mode 100644 slides/pks/wp/values.yaml diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml index f71db8513..d7fa9fe62 100644 --- a/slides/kube-fullday-namespaced.yml +++ b/slides/kube-fullday-namespaced.yml @@ -85,6 +85,7 @@ chapters: #- k8s/owners-and-dependents.md #- k8s/gitworkflows.md - pks/helm.md + - pks/helm-wordpress.md - - k8s/whatsnext.md - k8s/links.md diff --git a/slides/pks/helm-wordpress.md b/slides/pks/helm-wordpress.md new file mode 100644 index 000000000..882a68da3 --- /dev/null +++ b/slides/pks/helm-wordpress.md @@ -0,0 +1,109 @@ +## Why wordpress its 2019?!?! + +I know ... funny right :) + +--- + +## Helm install notes + +- You'll notice a helpful message after running `helm install` that looks something like this: + +``` +NOTES: +1. Get the WordPress URL: + + echo "WordPress URL: http://127.0.0.1:8080/" + echo "WordPress Admin URL: http://127.0.0.1:8080/admin" + kubectl port-forward --namespace user1 svc/wp-wordpress 8080:80 + +2. Login with the following credentials to see your blog + + echo Username: user + echo Password: $(kubectl get secret --namespace user1 wp-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode) +``` + +-- + +Helm charts generally have a `NOTES.txt` template that is rendered out and displayed after helm commands are run. Pretty neat. + +--- + +## What did helm install ? + +- Run `kubectl get all` to check what resources helm installed + +.exercise[ + - Run `kubectl get all`: + ```bash + kubectl get all + ``` + +] +--- + +## What did helm install ? + +``` +NAME READY STATUS RESTARTS AGE +pod/wp-mariadb-0 1/1 Running 0 11m +pod/wp-wordpress-6cb9cfc94-chbr6 1/1 Running 0 11m + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/wp-mariadb ClusterIP 10.100.200.87 3306/TCP 11m +service/wp-wordpress ClusterIP 10.100.200.131 80/TCP,443/TCP 11m + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/wp-wordpress 1/1 1 1 11m + +NAME DESIRED CURRENT READY AGE +replicaset.apps/tiller-deploy-6487f7bfd8 1 1 1 2d6h +replicaset.apps/tiller-deploy-75ccf68856 0 0 0 2d6h +replicaset.apps/wp-wordpress-6cb9cfc94 1 1 1 11m + +NAME READY AGE +statefulset.apps/wp-mariadb 1/1 11m + +``` + +--- + +## Check if wordpress is working + +- Using the notes provided from helm check you can access your wordpress and login as `user` + +.exercise[ + - run the commands provided by the helm summary: + ```bash + echo Username: user + echo Password: $(kubectl get secret --namespace user1 wp-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode) + + kubectl port-forward --namespace user1 svc/wp-wordpress 8080:80 + ``` +] + +-- + +Yay? you have a 2003 era blog + +--- + +## Helm Chart Values + +Settings values on the command line is okay for a demonstration, but we should really be creating a `~/workshop/values.yaml` file for our chart. Let's do that now. + +> the values file is a bit long to copy/paste from here, so lets wget it. + +.exercise[ + - Download the values.yaml file and edit it, changing the URL prefix to be `-wp`: + ```bash + wget -O ~/workshop/values.yaml \ + https://raw.githubusercontent.com/paulczar/container.training/pks/slides/pks/wp/values.yaml + + vim ~/workshop/values.yaml + + helm upgrade wp stable/wordpress -f ~/workshop/values.yaml + + ``` +] + +--- \ No newline at end of file diff --git a/slides/pks/helm.md b/slides/pks/helm.md index 6cab8141e..2ad003e5f 100644 --- a/slides/pks/helm.md +++ b/slides/pks/helm.md @@ -154,6 +154,7 @@ fine for personal and development clusters.) - Add the stable repo ```bash helm repo add stable https://kubernetes-charts.storage.googleapis.com/ + helm repo update ``` ] @@ -165,16 +166,16 @@ fine for personal and development clusters.) - Most charts require persistent volumes to store data -- We need to relax these requirements a bit +- We can relax these requirements a bit .exercise[ -- Install the Prometheus metrics collector on our cluster: +- Install on our cluster: ```bash - helm install stable/prometheus \ - prometheus \ - --set server.service.type=ClusterIP \ - --set server.persistentVolume.enabled=false + helm install wp stable/wordpress \ + --set service.type=ClusterIP \ + --set persistence.enabled=false \ + --set mariadb.master.persistence.enabled=false ``` ] @@ -189,9 +190,9 @@ Where do these `--set` options come from? .exercise[ -- See the metadata and all available options for `stable/prometheus`: +- See the metadata and all available options for `stable/wordpress`: ```bash - helm inspect stable/prometheus + helm inspect stable/wordpress ``` ] diff --git a/slides/pks/wp/values.yaml b/slides/pks/wp/values.yaml new file mode 100644 index 000000000..cbc37988b --- /dev/null +++ b/slides/pks/wp/values.yaml @@ -0,0 +1,18 @@ +service: + type: ClusterIP +persistence: + enabled: false +mariadb: + master: + persistence: + enabled: false +ingress: + enabled: true + certManager: true + hosts: + - name: user1-wp.ingress.workshop.paulczar.wtf + path: / + tls: + - hosts: + - user1-wp.ingress.workshop.paulczar.wtf + secretName: wordpress-tls \ No newline at end of file From af0e2f4dcd1244baa3372a06120e8af3c0ed21d2 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 3 Sep 2019 16:10:09 -0500 Subject: [PATCH 04/14] helm prereqs --- slides/pks/helm.md | 2 +- slides/pks/prereqs.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/slides/pks/helm.md b/slides/pks/helm.md index 2ad003e5f..51ab4e951 100644 --- a/slides/pks/helm.md +++ b/slides/pks/helm.md @@ -177,9 +177,9 @@ fine for personal and development clusters.) --set persistence.enabled=false \ --set mariadb.master.persistence.enabled=false ``` - ] + Where do these `--set` options come from? --- diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md index 9607699da..deff59632 100644 --- a/slides/pks/prereqs.md +++ b/slides/pks/prereqs.md @@ -30,8 +30,8 @@ - Bonus tools * [octant](https://github.com/vmware/octant#installation) -* [stern]() -* [jq]() +* [stern](https://github.com/wercker/stern/releases/tag/1.11.0) +* [jq](https://stedolan.github.io/jq/download/) --- From 8b24ded563515cb2bd0dd765941cfd2ec822e7e1 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 11 Feb 2020 12:30:18 -0600 Subject: [PATCH 05/14] update to pks docs --- slides/kube-fullday-namespaced.yml | 9 ++++----- slides/pks/connecting.md | 2 +- slides/pks/logistics.md | 10 ---------- slides/pks/prereqs.md | 2 +- slides/pks/title.md | 23 +++++++++++++++++++++++ slides/shared/about-slides.md | 2 +- 6 files changed, 30 insertions(+), 18 deletions(-) create mode 100644 slides/pks/title.md diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml index d7fa9fe62..7570699af 100644 --- a/slides/kube-fullday-namespaced.yml +++ b/slides/kube-fullday-namespaced.yml @@ -1,20 +1,19 @@ title: | - Deploying and Scaling Microservices - with Kubernetes + Config Management Camp - Kubernetes Workshop #chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" #chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" chat: "In person!" -gitrepo: github.com/jpetazzo/container.training +gitrepo: github.com/paulczar/container.training -slides: http://container.training/ +slides: https://k8s.camp/cfgcamp/ exclude: - self-paced chapters: -- shared/title.md +- pks/title.md - pks/logistics.md - k8s/intro.md - shared/about-slides.md diff --git a/slides/pks/connecting.md b/slides/pks/connecting.md index 89d32f799..1ee08026d 100644 --- a/slides/pks/connecting.md +++ b/slides/pks/connecting.md @@ -4,7 +4,7 @@ class: in-person .exercise[ -- Log into https://workshop.paulczar.wtf with your provided credentials +- Log into https://gangway.workshop.paulczar.wtf with your provided credentials (sorry about the [self-signed cert](https://gist.githubusercontent.com/paulczar/6e3f48a03e544627952aaa399a29a4af/raw/9e530371d8929ab573a205238dd0f2c718edc64c/ca.cert)) - Follow the instructions on the auth portal to set up a `kubeconfig` file. diff --git a/slides/pks/logistics.md b/slides/pks/logistics.md index 1726024d9..e8071a57b 100644 --- a/slides/pks/logistics.md +++ b/slides/pks/logistics.md @@ -1,21 +1,11 @@ ## Intros -- This slide should be customized by the tutorial instructor(s). - - Hello! We are: - .emoji[👨🏾‍🎓] Paul Czarkowski ([@pczarkowski](https://twitter.com/pczarkowski), Pivotal Software) - .emoji[👨🏾‍🎓] Tyler Britten ([@tybritten](https://twitter.com/tybritten), Pivotal Software) -- The workshop will run from ... - -- There will be a lunch break at ... - - (And coffee breaks!) - - Feel free to interrupt for questions at any time - *Especially when you see full screen container pictures!* - -- Live feedback, questions, help: @@CHAT@@ diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md index deff59632..a969f44b5 100644 --- a/slides/pks/prereqs.md +++ b/slides/pks/prereqs.md @@ -25,7 +25,7 @@ - You'll need the following software installed on your local laptop: * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -* [helm](https://helm.sh/docs/using_helm/#installing-helm) +* [helm 3](https://helm.sh/docs/using_helm/#installing-helm) - Bonus tools diff --git a/slides/pks/title.md b/slides/pks/title.md new file mode 100644 index 000000000..568203a94 --- /dev/null +++ b/slides/pks/title.md @@ -0,0 +1,23 @@ +class: title, self-paced + +@@TITLE@@ + +.nav[*Self-paced version*] + +--- + +class: title, in-person + +@@TITLE@@

+ +.footnote[ +**Be kind to the WiFi!**
+ +*Don't use your hotspot.*
+*Don't stream videos or download big files during the workshop[.](https://www.youtube.com/watch?v=h16zyxiwDLY)*
+*Thank you!* + +**Slides: @@SLIDES@@**
+**Credentials: https://tinyurl.com/k8scamp**
+**Login: https://gangway.workshop.demo.paulczar.wtf** +] diff --git a/slides/shared/about-slides.md b/slides/shared/about-slides.md index bef7a2bcc..498f09f39 100644 --- a/slides/shared/about-slides.md +++ b/slides/shared/about-slides.md @@ -6,7 +6,7 @@ - You can get updated "builds" of the slides there: - http://container.training/ + @@SLIDES@@ ] + +--- + +## Cleanup ping pong deployment + +- Time to clean up pingpong and move on + +.exercise[ + + - delete the pingpong deployment + ```bash + kubectl delete deployment pingpong + ``` +] \ No newline at end of file diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml new file mode 100644 index 000000000..f71db8513 --- /dev/null +++ b/slides/kube-fullday-namespaced.yml @@ -0,0 +1,91 @@ +title: | + Deploying and Scaling Microservices + with Kubernetes + +#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" +#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" +chat: "In person!" + +gitrepo: github.com/jpetazzo/container.training + +slides: http://container.training/ + +exclude: +- self-paced + +chapters: +- shared/title.md +- pks/logistics.md +- k8s/intro.md +- shared/about-slides.md +- shared/toc.md +- + - pks/prereqs.md + #- shared/webssh.md + - pks/connecting.md + # - k8s/versions-k8s.md + #- shared/sampleapp.md + #- shared/composescale.md + #- shared/hastyconclusions.md + #- shared/composedown.md + - pks/concepts-k8s.md + - pks/kubectlget.md +- + - k8s/kubectlrun.md + - k8s/logs-cli.md + - shared/declarative.md + - k8s/declarative.md + - k8s/deploymentslideshow.md + - k8s/kubenet.md + - pks/kubectlexpose.md + - k8s/shippingimages.md + #- k8s/buildshiprun-selfhosted.md + - k8s/buildshiprun-dockerhub.md + - pks/ourapponkube.md + #- k8s/kubectlproxy.md + #- k8s/localkubeconfig.md + #- k8s/accessinternal.md +- + - pks/setup-k8s.md + - pks/dashboard.md + - pks/octant.md + #- k8s/kubectlscale.md + - pks/scalingdockercoins.md + - shared/hastyconclusions.md + - k8s/daemonset.md + - k8s/rollout.md + #- k8s/healthchecks.md + #- k8s/healthchecks-more.md + #- k8s/record.md +- + #- k8s/namespaces.md + - pks/ingress.md + - pks/cleanup-dockercoins.md + #- k8s/kustomize.md + #- k8s/helm.md + #- k8s/create-chart.md + #- k8s/netpol.md + #- k8s/authn-authz.md + #- k8s/csr-api.md + #- k8s/openid-connect.md + #- k8s/podsecuritypolicy.md + - k8s/volumes.md + #- k8s/build-with-docker.md + #- k8s/build-with-kaniko.md + - k8s/configuration.md + #- k8s/logs-centralized.md + #- k8s/prometheus.md + #- k8s/statefulsets.md + #- k8s/local-persistent-volumes.md + #- k8s/portworx.md + #- k8s/extending-api.md + #- k8s/operators.md + #- k8s/operators-design.md + #- k8s/staticpods.md + #- k8s/owners-and-dependents.md + #- k8s/gitworkflows.md + - pks/helm.md +- + - k8s/whatsnext.md + - k8s/links.md + - shared/thankyou.md diff --git a/slides/pks/cleanup-dockercoins.md b/slides/pks/cleanup-dockercoins.md new file mode 100644 index 000000000..8fa5bce1a --- /dev/null +++ b/slides/pks/cleanup-dockercoins.md @@ -0,0 +1,12 @@ +# Let's do some housekeeping + +- We've created a lot of resources, let's clean them up. + +.exercise[ + - Delete resources: + ```bash + kubectl delete deployment,svc hasher redis rng webui + kubectl delete deployment worker + kubectl delete ingress webui + kubectl delete daemonset rng +] diff --git a/slides/pks/concepts-k8s.md b/slides/pks/concepts-k8s.md new file mode 100644 index 000000000..edb6f1380 --- /dev/null +++ b/slides/pks/concepts-k8s.md @@ -0,0 +1,257 @@ +# Kubernetes concepts + +- Kubernetes is a container management system + +- It runs and manages containerized applications on a cluster + +-- + +- What does that really mean? + +--- + +## Basic things we can ask Kubernetes to do + +-- + +- Start 5 containers using image `atseashop/api:v1.3` + +-- + +- Place an internal load balancer in front of these containers + +-- + +- Start 10 containers using image `atseashop/webfront:v1.3` + +-- + +- Place a public load balancer in front of these containers + +-- + +- It's Black Friday (or Christmas), traffic spikes, grow our cluster and add containers + +-- + +- New release! Replace my containers with the new image `atseashop/webfront:v1.4` + +-- + +- Keep processing requests during the upgrade; update my containers one at a time + +--- + +## Other things that Kubernetes can do for us + +- Basic autoscaling + +- Blue/green deployment, canary deployment + +- Long running services, but also batch (one-off) jobs + +- Overcommit our cluster and *evict* low-priority jobs + +- Run services with *stateful* data (databases etc.) + +- Fine-grained access control defining *what* can be done by *whom* on *which* resources + +- Integrating third party services (*service catalog*) + +- Automating complex tasks (*operators*) + +--- + +## Kubernetes architecture + +--- + +class: pic + +![haha only kidding](images/k8s-arch1.png) + +--- + +## Kubernetes architecture + +- Ha ha ha ha + +- OK, I was trying to scare you, it's much simpler than that ❤️ + +--- + +class: pic + +![that one is more like the real thing](images/k8s-arch2.png) + +--- + +## Credits + +- The first schema is a Kubernetes cluster with storage backed by multi-path iSCSI + + (Courtesy of [Yongbok Kim](https://www.yongbok.net/blog/)) + +- The second one is a simplified representation of a Kubernetes cluster + + (Courtesy of [Imesh Gunaratne](https://medium.com/containermind/a-reference-architecture-for-deploying-wso2-middleware-on-kubernetes-d4dee7601e8e)) + +--- + +## Kubernetes architecture: the data plane + +- The data plane is a collection of nodes that execute our containers + +- These nodes run a collection of services: + + - a container Engine (typically Docker) + + - kubelet (the "node agent") + + - kube-proxy (a necessary but not sufficient network component) + +- Nodes were formerly called "minions" + + (You might see that word in older articles or documentation) + +--- + +## Kubernetes architecture: the control plane + +- The Kubernetes logic (its "brains") is a collection of services: + + - the API server (our point of entry to everything!) + + - core services like the scheduler and controller manager + + - `etcd` (a highly available key/value store; the "database" of Kubernetes) + +- Together, these services form the control plane of our cluster + +- The control plane is also called the "master" + +--- + +class: pic + +![One of the best Kubernetes architecture diagrams available](images/k8s-arch4-thanks-luxas.png) + +--- + +class: extra-details + +## Running the control plane on special nodes + +- PKS reserves dedicated node[s] for the control plane + +- This node is then called a "master" + + (Yes, this is ambiguous: is the "master" a node, or the whole control plane?) + +- Normal applications are restricted from running on this node + +- When high availability is required, each service of the control plane must be resilient + +- The control plane is then replicated on multiple nodes + + (This is sometimes called a "multi-master" setup) + +--- + +class: extra-details + +## Do we need to run Docker at all? + +No! + +-- + +- By default, Kubernetes uses the Docker Engine to run containers + +- We could also use `rkt` ("Rocket") from CoreOS + +- Or leverage other pluggable runtimes through the *Container Runtime Interface* + + (like CRI-O, or containerd) + +--- + +class: extra-details + +## Do we need to run Docker at all? + +Yes! + +-- + +- Our Kubernetes cluster is using Docker as the container engine + +- We still use it to build images and ship them around + +- We can do these things without Docker +
+ (and get diagnosed with NIH¹ syndrome) + +- Docker is still the most stable container engine today +
+ (but other options are maturing very quickly) + +.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)] + +--- + +class: extra-details + +## Do we need to run Docker at all? + +- On our development environments, CI pipelines ... : + + *Yes, almost certainly* + +- On our production servers: + + *Yes (today)* + + *Probably not (in the future)* + +.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)] + +--- + +## Interacting with Kubernetes + +- We will interact with our Kubernetes cluster through the Kubernetes API + +- The Kubernetes API is (mostly) RESTful + +- It allows us to create, read, update, delete *resources* + +- A few common resource types are: + + - node (a machine — physical or virtual — in our cluster) + + - pod (group of containers running together on a node) + + - service (stable network endpoint to connect to one or multiple containers) + +--- + +class: pic + +![Node, pod, container](images/k8s-arch3-thanks-weave.png) + +--- + +## Credits + +- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha) + + - it's one of the best Kubernetes architecture diagrams available! + +- The second diagram is courtesy of Weave Works + + - a *pod* can have multiple containers working together + + - IP addresses are associated with *pods*, not with individual containers + +Both diagrams used with permission. diff --git a/slides/pks/connecting.md b/slides/pks/connecting.md new file mode 100644 index 000000000..89d32f799 --- /dev/null +++ b/slides/pks/connecting.md @@ -0,0 +1,84 @@ +class: in-person + +## Connecting to our lab environment + +.exercise[ + +- Log into https://workshop.paulczar.wtf with your provided credentials + +- Follow the instructions on the auth portal to set up a `kubeconfig` file. + +- Check that you can connect to the cluster with `kubectl get nodes`: + +```bash +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +vm-0f2b473c-5ae6-4af3-4e80-f0a068b03abe Ready 23h v1.14.5 +vm-25cfc8d6-88c0-45f6-4305-05e859af7f2c Ready 23h v1.14.5 +... +... +``` +] + +If anything goes wrong — ask for help! + +--- + +## Doing or re-doing the workshop on your own? + +- Use something like + [Play-With-Docker](http://play-with-docker.com/) or + [Play-With-Kubernetes](https://training.play-with-kubernetes.com/) + + Zero setup effort; but environment are short-lived and + might have limited resources + +- Create your own cluster (local or cloud VMs) + + Small setup effort; small cost; flexible environments + +- Create a bunch of clusters for you and your friends + ([instructions](https://@@GITREPO@@/tree/master/prepare-vms)) + + Bigger setup effort; ideal for group training + +--- + +class: self-paced + +## Get your own Docker nodes + +- If you already have some Docker nodes: great! + +- If not: let's get some thanks to Play-With-Docker + +.exercise[ + +- Go to http://www.play-with-docker.com/ + +- Log in + +- Create your first node + + + +] + +You will need a Docker ID to use Play-With-Docker. + +(Creating a Docker ID is free.) + +--- + +## Terminals + +Once in a while, the instructions will say: +
"Open a new terminal." + +There are multiple ways to do this: + +- create a new window or tab on your machine, and SSH into the VM; + +- use screen or tmux on the VM and open a new window from there. + +You are welcome to use the method that you feel the most comfortable with. diff --git a/slides/pks/dashboard.md b/slides/pks/dashboard.md new file mode 100644 index 000000000..18cca1d72 --- /dev/null +++ b/slides/pks/dashboard.md @@ -0,0 +1,166 @@ +# The Kubernetes dashboard + +- Kubernetes resources can also be viewed with a web dashboard + +- That dashboard is usually exposed over HTTPS + + (this requires obtaining a proper TLS certificate) + +- Dashboard users need to authenticate + +- Most people just YOLO it into their cluster and then get hacked + +--- + +## Stop the madness + +You know what, this is all a very bad idea. Let's not run the Kubernetes dashboard at all ... ever. + +The following slides are informational. Do not run them. + +--- + +## The insecure method + +- We could (and should) use [Let's Encrypt](https://letsencrypt.org/) ... + +- ... but we don't want to deal with TLS certificates + +- We could (and should) learn how authentication and authorization work ... + +- ... but we will use a guest account with admin access instead + +.footnote[.warning[Yes, this will open our cluster to all kinds of shenanigans. Don't do this at home.]] + +--- + +## Running a very insecure dashboard + +- We are going to deploy that dashboard with *one single command* + +- This command will create all the necessary resources + + (the dashboard itself, the HTTP wrapper, the admin/guest account) + +- All these resources are defined in a YAML file + +- All we have to do is load that YAML file with with `kubectl apply -f` + +.exercise[ + +- Create all the dashboard resources, with the following command: + ```bash + kubectl apply -f ~/container.training/k8s/insecure-dashboard.yaml + ``` + +] + +--- + +## Connecting to the dashboard + +.exercise[ + +- Check which port the dashboard is on: + ```bash + kubectl get svc dashboard + ``` + +] + +You'll want the `3xxxx` port. + + +.exercise[ + +- Connect to http://oneofournodes:3xxxx/ + + + +] + +The dashboard will then ask you which authentication you want to use. + +--- + +## Dashboard authentication + +- We have three authentication options at this point: + + - token (associated with a role that has appropriate permissions) + + - kubeconfig (e.g. using the `~/.kube/config` file from `node1`) + + - "skip" (use the dashboard "service account") + +- Let's use "skip": we're logged in! + +-- + +.warning[By the way, we just added a backdoor to our Kubernetes cluster!] + +--- + +## Running the Kubernetes dashboard securely + +- The steps that we just showed you are *for educational purposes only!* + +- If you do that on your production cluster, people [can and will abuse it](https://redlock.io/blog/cryptojacking-tesla) + +- For an in-depth discussion about securing the dashboard, +
+ check [this excellent post on Heptio's blog](https://blog.heptio.com/on-securing-the-kubernetes-dashboard-16b09b1b7aca) + +--- + +# Security implications of `kubectl apply` + +- When we do `kubectl apply -f `, we create arbitrary resources + +- Resources can be evil; imagine a `deployment` that ... + +-- + + - starts bitcoin miners on the whole cluster + +-- + + - hides in a non-default namespace + +-- + + - bind-mounts our nodes' filesystem + +-- + + - inserts SSH keys in the root account (on the node) + +-- + + - encrypts our data and ransoms it + +-- + + - ☠️☠️☠️ + +--- + +## `kubectl apply` is the new `curl | sh` + +- `curl | sh` is convenient + +- It's safe if you use HTTPS URLs from trusted sources + +-- + +- `kubectl apply -f` is convenient + +- It's safe if you use HTTPS URLs from trusted sources + +- Example: the official setup instructions for most pod networks + +-- + +- It introduces new failure modes + + (for instance, if you try to apply YAML from a link that's no longer valid) diff --git a/slides/pks/helm.md b/slides/pks/helm.md new file mode 100644 index 000000000..6cab8141e --- /dev/null +++ b/slides/pks/helm.md @@ -0,0 +1,216 @@ +# Managing stacks with Helm + +- We created our first resources with `kubectl run`, `kubectl expose` ... + +- We have also created resources by loading YAML files with `kubectl apply -f` + +- For larger stacks, managing thousands of lines of YAML is unreasonable + +- These YAML bundles need to be customized with variable parameters + + (E.g.: number of replicas, image version to use ...) + +- It would be nice to have an organized, versioned collection of bundles + +- It would be nice to be able to upgrade/rollback these bundles carefully + +- [Helm](https://helm.sh/) is an open source project offering all these things! + +--- + +## Helm concepts + +- `helm` is a CLI tool + +- `tiller` is its companion server-side component + +- A "chart" is an archive containing templatized YAML bundles + +- Charts are versioned + +- Charts can be stored on private or public repositories + +-- + +*We're going to use the beta of Helm 3 as it does not require `tiller` making things simpler and more secure for us.* +--- + +## Installing Helm + +- If the `helm` 3 CLI is not installed in your environment, [install it](https://github.com/helm/helm/releases/tag/v3.0.0-beta.1) + +.exercise[ + +- Check if `helm` is installed: + ```bash + helm version + ``` +] + +-- + +```bash +version.BuildInfo{Version:"v3.0.0-beta.1", GitCommit:"f76b5f21adb53a85de8925f4a9d4f9bd99f185b5", GitTreeState:"clean", GoVersion:"go1.12.9"}` +``` + +--- + +## Oops you accidently a Helm 2 + +If `helm version` gives you a result like below it means you have helm 2 which requires the `tiller` server side component. + +``` +Client: &version.Version{SemVer:"v2.14.0", GitCommit:"05811b84a3f93603dd6c2fcfe57944dfa7ab7fd0", GitTreeState:"clean"} +Error: forwarding ports: error upgrading connection: pods "tiller-deploy-6fd87785-x8sxk" is forbidden: User "user1" cannot create resource "pods/portforward" in API group "" in the namespace "kube-system" +``` + +Run `EXPORT TILLER_NAMESPACE=` and try again. We've pre-installed `tiller` for you in your namespace just in case. + +-- + +Some of the commands in the following may not work in helm 2. Good luck! + +--- + +## Installing Tiller + +*If you were running Helm 2 you would need to install Tiller. We can skip this.* + +- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace + +- They can be managed (installed, upgraded...) with the `helm` CLI + +.exercise[ + +- Deploy Tiller: + ```bash + helm init + ``` + +] + +If Tiller was already installed, don't worry: this won't break it. + +At the end of the install process, you will see: + +``` +Happy Helming! +``` + +--- + +## Fix account permissions + +*If you were running Helm 2 you would need to install Tiller. We can skip this.* + +- Helm permission model requires us to tweak permissions + +- In a more realistic deployment, you might create per-user or per-team + service accounts, roles, and role bindings + +.exercise[ + +- Grant `cluster-admin` role to `kube-system:default` service account: + ```bash + kubectl create clusterrolebinding add-on-cluster-admin \ + --clusterrole=cluster-admin --serviceaccount=kube-system:default + ``` + +] + +(Defining the exact roles and permissions on your cluster requires +a deeper knowledge of Kubernetes' RBAC model. The command above is +fine for personal and development clusters.) + +--- + +## View available charts + +- A public repo is pre-configured when installing Helm + +- We can view available charts with `helm search` (and an optional keyword) + +.exercise[ + +- View all available charts: + ```bash + helm search hub + ``` + +- View charts related to `prometheus`: + ```bash + helm search hub prometheus + ``` + +] + +--- + +## Add the stable chart repository + +- Helm 3 does not come configured with any repositories, so we need to start by adding the stable repo. + +.exercise[ + - Add the stable repo + ```bash + helm repo add stable https://kubernetes-charts.storage.googleapis.com/ + ``` +] + +--- + +## Install a chart + +- Most charts use `LoadBalancer` service types by default + +- Most charts require persistent volumes to store data + +- We need to relax these requirements a bit + +.exercise[ + +- Install the Prometheus metrics collector on our cluster: + ```bash + helm install stable/prometheus \ + prometheus \ + --set server.service.type=ClusterIP \ + --set server.persistentVolume.enabled=false + ``` + +] + +Where do these `--set` options come from? + +--- + +## Inspecting a chart + +- `helm inspect` shows details about a chart (including available options) + +.exercise[ + +- See the metadata and all available options for `stable/prometheus`: + ```bash + helm inspect stable/prometheus + ``` + +] + +The chart's metadata includes a URL to the project's home page. + +(Sometimes it conveniently points to the documentation for the chart.) + +--- + +## Viewing installed charts + +- Helm keeps track of what we've installed + +.exercise[ + +- List installed Helm charts: + ```bash + helm list + ``` + +] diff --git a/slides/pks/ingress.md b/slides/pks/ingress.md new file mode 100644 index 000000000..b296ccfd7 --- /dev/null +++ b/slides/pks/ingress.md @@ -0,0 +1,247 @@ +# Exposing HTTP services with Ingress resources + +- *Services* give us a way to access a pod or a set of pods + +- Services can be exposed to the outside world: + + - with type `NodePort` (on a port >30000) + + - with type `LoadBalancer` (allocating an external load balancer) + +- What about HTTP services? + + - how can we expose `webui`, `rng`, `hasher`? + + - the Kubernetes dashboard? + + - a new version of `webui`? + +--- + +## Exposing HTTP services + +- If we use `NodePort` services, clients have to specify port numbers + + (i.e. http://xxxxx:31234 instead of just http://xxxxx) + +- `LoadBalancer` services are nice, but: + + - they are not available in all environments + + - they often carry an additional cost (e.g. they provision an ELB) + + - they require one extra step for DNS integration +
+ (waiting for the `LoadBalancer` to be provisioned; then adding it to DNS) + +- We could build our own reverse proxy + +--- + +## Building a custom reverse proxy + +- There are many options available: + + Apache, HAProxy, Hipache, NGINX, Traefik, ... + + (look at [jpetazzo/aiguillage](https://github.com/jpetazzo/aiguillage) for a minimal reverse proxy configuration using NGINX) + +- Most of these options require us to update/edit configuration files after each change + +- Some of them can pick up virtual hosts and backends from a configuration store + +- Wouldn't it be nice if this configuration could be managed with the Kubernetes API? + +-- + +- Enter.red[¹] *Ingress* resources! + +.footnote[.red[¹] Pun maybe intended.] + +--- + +## Ingress resources + +- Kubernetes API resource (`kubectl get ingress`/`ingresses`/`ing`) + +- Designed to expose HTTP services + +- Basic features: + + - load balancing + - SSL termination + - name-based virtual hosting + +- Can also route to different services depending on: + + - URI path (e.g. `/api`→`api-service`, `/static`→`assets-service`) + - Client headers, including cookies (for A/B testing, canary deployment...) + - and more! + +--- + +## Principle of operation + +- Step 1: deploy an *ingress controller* + + - ingress controller = load balancer + control loop + + - the control loop watches over ingress resources, and configures the LB accordingly + +- Step 2: set up DNS + + - associate DNS entries with the load balancer address + +- Step 3: create *ingress resources* + + - the ingress controller picks up these resources and configures the LB + +- Step 4: profit! + +--- + +## Ingress in action + +- We already have an nginx-ingress controller deployed + +- For DNS, we have a wildcard set up pointing at our ingress LB + + - `*.ingress.workshop.paulczar.wtf` + +- We will create ingress resources for various HTTP services + +--- + +## Checking that nginx-ingress runs correctly + +- If Traefik started correctly, we now have a web server listening on each node + +.exercise[ + +- Check that nginx is serving 80/tcp: + ```bash + curl test.ingress.workshop.paulczar.wtf + ``` + +] + +We should get a `404 page not found` error. + +This is normal: we haven't provided any ingress rule yet. + +--- + +## Expose that webui + +- Before we can enable the ingress, we need to create a service for the webui + +.exercise[ + + - create a service for the webui deployment + ```bash + kubectl expose deployment webui --port 80 + ``` + +] + +--- + + +## Setting up host-based routing ingress rules + +- We are going to create an ingress rule for our webui + +.exercise[ + - Write this to `~/workshop/ingress.yaml` and change the host prefix +] + +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: webui +spec: + rules: + - host: user1.ingress.workshop.paulczar.wtf + http: + paths: + - path: / + backend: + serviceName: webui + servicePort: 80 +``` + +--- + +## Creating our ingress resources + +.exercise[ + - Apply the ingress manifest + ```bash + kubectl apply -f ~/workshop/ingress.yaml + ``` +] + +-- + +```bash +$ curl user1.ingress.workshop.paulczar.wtf +Found. Redirecting to /index.html +``` + + +--- + +## Using multiple ingress controllers + +- You can have multiple ingress controllers active simultaneously + + (e.g. Traefik and NGINX) + +- You can even have multiple instances of the same controller + + (e.g. one for internal, another for external traffic) + +- The `kubernetes.io/ingress.class` annotation can be used to tell which one to use + +- It's OK if multiple ingress controllers configure the same resource + + (it just means that the service will be accessible through multiple paths) + +--- + +## Ingress: the good + +- The traffic flows directly from the ingress load balancer to the backends + + - it doesn't need to go through the `ClusterIP` + + - in fact, we don't even need a `ClusterIP` (we can use a headless service) + +- The load balancer can be outside of Kubernetes + + (as long as it has access to the cluster subnet) + +- This allows the use of external (hardware, physical machines...) load balancers + +- Annotations can encode special features + + (rate-limiting, A/B testing, session stickiness, etc.) + +--- + +## Ingress: the bad + +- Aforementioned "special features" are not standardized yet + +- Some controllers will support them; some won't + +- Even relatively common features (stripping a path prefix) can differ: + + - [traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip](https://docs.traefik.io/user-guide/kubernetes/#path-based-routing) + + - [ingress.kubernetes.io/rewrite-target: /](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/rewrite) + +- This should eventually stabilize + + (remember that ingresses are currently `apiVersion: networking.k8s.io/v1beta1`) diff --git a/slides/pks/kubectlexpose.md b/slides/pks/kubectlexpose.md new file mode 100644 index 000000000..38b762e3f --- /dev/null +++ b/slides/pks/kubectlexpose.md @@ -0,0 +1,363 @@ +# Exposing containers + +- `kubectl expose` creates a *service* for existing pods + +- A *service* is a stable address for a pod (or a bunch of pods) + +- If we want to connect to our pod(s), we need to create a *service* + +- Once a service is created, CoreDNS will allow us to resolve it by name + + (i.e. after creating service `hello`, the name `hello` will resolve to something) + +- There are different types of services, detailed on the following slides: + + `ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName` + +--- + +## Basic service types + +- `ClusterIP` (default type) + + - a virtual IP address is allocated for the service (in an internal, private range) + - this IP address is reachable only from within the cluster (nodes and pods) + - our code can connect to the service using the original port number + +- `NodePort` + + - a port is allocated for the service (by default, in the 30000-32768 range) + - that port is made available *on all our nodes* and anybody can connect to it + - our code must be changed to connect to that new port number + +These service types are always available. + +Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` rules. + +--- + +## More service types + +- `LoadBalancer` + + - an external load balancer is allocated for the service + - the load balancer is configured accordingly +
(e.g.: a `NodePort` service is created, and the load balancer sends traffic to that port) + - available only when the underlying infrastructure provides some "load balancer as a service" +
(e.g. AWS, Azure, GCE, OpenStack...) + +- `ExternalName` + + - the DNS entry managed by CoreDNS will just be a `CNAME` to a provided record + - no port, no IP address, no nothing else is allocated + +--- + +## Running containers with open ports + +- Since `ping` doesn't have anything to connect to, we'll have to run something else + +- We could use the `nginx` official image, but ... + + ... we wouldn't be able to tell the backends from each other! + +- We are going to use `jpetazzo/httpenv`, a tiny HTTP server written in Go + +- `jpetazzo/httpenv` listens on port 8888 + +- It serves its environment variables in JSON format + +- The environment variables will include `HOSTNAME`, which will be the pod name + + (and therefore, will be different on each backend) + +--- + +## Creating a deployment for our HTTP server + +- We *could* do `kubectl run httpenv --image=jpetazzo/httpenv` ... + +- But since `kubectl run` is being deprecated, let's see how to use `kubectl create` instead + +.exercise[ + +- In another window, watch the pods (to see when they are created): + ```bash + kubectl get pods -w + ``` + + + +- Create a deployment for this very lightweight HTTP server: + ```bash + kubectl create deployment httpenv --image=jpetazzo/httpenv + ``` + +- Scale it to 3 replicas: + ```bash + kubectl scale deployment httpenv --replicas=3 + ``` + +] + +--- + +## Exposing our deployment + +- We'll create a default `ClusterIP` service + +.exercise[ + +- Expose the HTTP port of our server: + ```bash + kubectl expose deployment httpenv --port 8888 + ``` + +- Look up which IP address was allocated: + ```bash + kubectl get service + ``` + +] + +--- + +## Services are layer 4 constructs + +- You can assign IP addresses to services, but they are still *layer 4* + + (i.e. a service is not an IP address; it's an IP address + protocol + port) + +- This is caused by the current implementation of `kube-proxy` + + (it relies on mechanisms that don't support layer 3) + +- As a result: you *have to* indicate the port number for your service + +- Running services with arbitrary port (or port ranges) requires hacks + + (e.g. host networking mode) + +--- + +## Testing our service + +- We will now send a few HTTP requests to our pods + +.exercise[ + +- Let's obtain the IP address that was allocated for our service, *programmatically:* + ```bash + IP=$(kubectl get svc httpenv -o go-template --template '{{ .spec.clusterIP }}') + ``` + +- Send a few requests: + ```bash + curl http://$IP:8888/ + ``` + +- Too much output? Filter it with `jq`: + ```bash + curl -s http://$IP:8888/ | jq .HOSTNAME + ``` + +] + +-- + +Oh right, that doesn't work, its a `cluster-ip`. We need another way to access it. + +--- + +## port forwarding + +- You can forward a local port from your machine into a pod + +.exercise[ + +- Forward a port into your deployment: + ```bash + kubectl port-forward service/httpenv 8888:8888 + ``` + +- In a new window run curl a few times: + ```bash + curl localhost:8888 + curl localhost:8888 + curl localhost:8888 + ``` + +- Hit `ctrl-c` in the original window to terminate the port-forward +] + +-- + +The response was the same from each request. This is because `kubectl port-forward` forwards to a specific pod, not to the cluster-ip. + +--- + +class: extra-details + +## If we don't need a clusterIP load balancer + +- Sometimes, we want to access our scaled services directly: + + - if we want to save a tiny little bit of latency (typically less than 1ms) + + - if we need to connect over arbitrary ports (instead of a few fixed ones) + + - if we need to communicate over another protocol than UDP or TCP + + - if we want to decide how to balance the requests client-side + + - ... + +- In that case, we can use a "headless service" + +--- + +class: extra-details + +## Headless services + +- A headless service is obtained by setting the `clusterIP` field to `None` + + (Either with `--cluster-ip=None`, or by providing a custom YAML) + +- As a result, the service doesn't have a virtual IP address + +- Since there is no virtual IP address, there is no load balancer either + +- CoreDNS will return the pods' IP addresses as multiple `A` records + +- This gives us an easy way to discover all the replicas for a deployment + +--- + +class: extra-details + +## Services and endpoints + +- A service has a number of "endpoints" + +- Each endpoint is a host + port where the service is available + +- The endpoints are maintained and updated automatically by Kubernetes + +.exercise[ + +- Check the endpoints that Kubernetes has associated with our `httpenv` service: + ```bash + kubectl describe service httpenv + ``` + +] + +In the output, there will be a line starting with `Endpoints:`. + +That line will list a bunch of addresses in `host:port` format. + +--- + +class: extra-details + +## Viewing endpoint details + +- When we have many endpoints, our display commands truncate the list + ```bash + kubectl get endpoints + ``` + +- If we want to see the full list, we can use one of the following commands: + ```bash + kubectl describe endpoints httpenv + kubectl get endpoints httpenv -o yaml + ``` + +- These commands will show us a list of IP addresses + +- These IP addresses should match the addresses of the corresponding pods: + ```bash + kubectl get pods -l app=httpenv -o wide + ``` + +--- + +class: extra-details + +## `endpoints` not `endpoint` + +- `endpoints` is the only resource that cannot be singular + +```bash +$ kubectl get endpoint +error: the server doesn't have a resource type "endpoint" +``` + +- This is because the type itself is plural (unlike every other resource) + +- There is no `endpoint` object: `type Endpoints struct` + +- The type doesn't represent a single endpoint, but a list of endpoints + +--- + +## Exposing services to the outside world + +- The default type (ClusterIP) only works for internal traffic + +- If we want to accept external traffic, we can use one of these: + + - NodePort (expose a service on a TCP port between 30000-32768) + + - LoadBalancer (provision a cloud load balancer for our service) + + - ExternalIP (use one node's external IP address) + + - Ingress (a special mechanism for HTTP services) + +*We'll see NodePorts and Ingresses more in detail later.* + +--- + +## Exposing services to the outside world + +.exercise[ + +- Set the service to be of type `Loadbalancer`: + ```bash + kubectl patch svc httpenv -p '{"spec": {"type": "LoadBalancer"}}' + ``` + +- Check for the IP of the loadbalancer: + ```bash + kubectl get svc httpenv + ``` + +- Test access via the loadbalancer: + ```bash + curl :8888 + ``` +] + +-- + +The `kubectl patch` command lets you patch a kubernetes resource to make minor changes like the above modification of the service type. + +--- + +## Cleanup + +.exercise[ + +- Delete the service + ```bash + kubectl delete svc httpenv + ``` + +- Delete the deployment + ```bash + kubectl delete deployment httpenv + ``` + +] diff --git a/slides/pks/kubectlget.md b/slides/pks/kubectlget.md new file mode 100644 index 000000000..65e8c4eca --- /dev/null +++ b/slides/pks/kubectlget.md @@ -0,0 +1,375 @@ +# First contact with `kubectl` + +- `kubectl` is (almost) the only tool we'll need to talk to Kubernetes + +- It is a rich CLI tool around the Kubernetes API + + (Everything you can do with `kubectl`, you can do directly with the API) + +- On our machines, there is a `~/.kube/config` file with: + + - the Kubernetes API address + + - the path to our TLS certificates used to authenticate + +- You can also use the `--kubeconfig` flag to pass a config file + +- Or directly `--server`, `--user`, etc. + +- `kubectl` can be pronounced "Cube C T L", "Cube cuttle", "Cube cuddle"... + +--- + +## `kubectl get` + +- Let's look at our `Node` resources with `kubectl get`! + +.exercise[ + +- Look at the composition of our cluster: + ```bash + kubectl get node + ``` + +- These commands are equivalent: + ```bash + kubectl get no + kubectl get node + kubectl get nodes + ``` + +] + +--- + +## Obtaining machine-readable output + +- `kubectl get` can output JSON, YAML, or be directly formatted + +.exercise[ + +- Give us more info about the nodes: + ```bash + kubectl get nodes -o wide + ``` + +- Let's have some YAML: + ```bash + kubectl get no -o yaml + ``` + See that `kind: List` at the end? It's the type of our result! + +] + +--- + +## (Ab)using `kubectl` and `jq` + +- It's super easy to build custom reports + +.exercise[ + +- Show the capacity of all our nodes as a stream of JSON objects: + ```bash + kubectl get nodes -o json | + jq ".items[] | {name:.metadata.name} + .status.capacity" + ``` + +] + +--- + +class: extra-details + +## Exploring types and definitions + +- We can list all available resource types by running `kubectl api-resources` +
+ (In Kubernetes 1.10 and prior, this command used to be `kubectl get`) + +- We can view the definition for a resource type with: + ```bash + kubectl explain type + ``` + +- We can view the definition of a field in a resource, for instance: + ```bash + kubectl explain node.spec + ``` + +- Or get the full definition of all fields and sub-fields: + ```bash + kubectl explain node --recursive + ``` + +--- + +class: extra-details + +## Introspection vs. documentation + +- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference) + +- The API documentation is usually easier to read, but: + + - it won't show custom types (like Custom Resource Definitions) + + - we need to make sure that we look at the correct version + +- `kubectl api-resources` and `kubectl explain` perform *introspection* + + (they communicate with the API server and obtain the exact type definitions) + +--- + +## Type names + +- The most common resource names have three forms: + + - singular (e.g. `node`, `service`, `deployment`) + + - plural (e.g. `nodes`, `services`, `deployments`) + + - short (e.g. `no`, `svc`, `deploy`) + +- Some resources do not have a short name + +- `Endpoints` only have a plural form + + (because even a single `Endpoints` resource is actually a list of endpoints) + +--- + +## Viewing details + +- We can use `kubectl get -o yaml` to see all available details + +- However, YAML output is often simultaneously too much and not enough + +- For instance, `kubectl get node node1 -o yaml` is: + + - too much information (e.g.: list of images available on this node) + + - not enough information (e.g.: doesn't show pods running on this node) + + - difficult to read for a human operator + +- For a comprehensive overview, we can use `kubectl describe` instead + +--- + +## `kubectl describe` + +- `kubectl describe` needs a resource type and (optionally) a resource name + +- It is possible to provide a resource name *prefix* + + (all matching objects will be displayed) + +- `kubectl describe` will retrieve some extra information about the resource + +.exercise[ + +- Look at the information available for `node1` with one of the following commands: + ```bash + kubectl describe node/node1 + kubectl describe node node1 + ``` + +] + +(We should notice a bunch of control plane pods.) + +--- + +## Services + +- A *service* is a stable endpoint to connect to "something" + + (In the initial proposal, they were called "portals") + +.exercise[ + +- List the services on our cluster with one of these commands: + ```bash + kubectl get services + kubectl get svc + ``` + +] + +-- + +There should be no services. This is because you're not running anything yet. But there are some services running in other namespaces. + +--- + +## Services + +- A *service* is a stable endpoint to connect to "something" + + (In the initial proposal, they were called "portals") + +.exercise[ + +- List the services on our cluster with one of these commands: + ```bash + kubectl get services --all-namespaces + kubectl get svc --all-namespaces + ``` + +] + +-- + +There's a bunch of services already running that are used in the operations of the Kubernetes cluster. + +--- + +## ClusterIP services + +- A `ClusterIP` service is internal, available from the cluster only + +- This is useful for introspection from within containers + +.exercise[ + +- Try to connect to the API: + ```bash + curl -k https://`10.100.200.1` + ``` + + - `-k` is used to skip certificate verification + + - Make sure to replace 10.100.200.1 with the CLUSTER-IP for the `kubernetes` service shown by `kubectl get svc` + +] + +-- + +The Cluster IP is only accessible from inside the cluster. We'll explore other ways to expose a service later. + +--- + +## Listing running containers + +- Containers are manipulated through *pods* + +- A pod is a group of containers: + + - running together (on the same node) + + - sharing resources (RAM, CPU; but also network, volumes) + +.exercise[ + +- List pods on our cluster: + ```bash + kubectl get pods + ``` + +] + +-- + +*Where are the pods that we saw just a moment earlier?!?* + +--- + +## Namespaces + +- Namespaces allow us to segregate resources + +.exercise[ + +- List the namespaces on our cluster with one of these commands: + ```bash + kubectl get namespaces + kubectl get namespace + kubectl get ns + ``` + +] + +-- + +*You know what ... This `kube-system` thing looks suspicious.* + +*In fact, I'm pretty sure it showed up earlier, when we did:* + +`kubectl describe node node1` + +--- + +## Accessing namespaces + +- By default, `kubectl` uses the `default` namespace + +- We can see resources in all namespaces with `--all-namespaces` + +.exercise[ + +- List the pods in all namespaces: + ```bash + kubectl get pods --all-namespaces + ``` + +- Since Kubernetes 1.14, we can also use `-A` as a shorter version: + ```bash + kubectl get pods -A + ``` + +] + +*Here are our system pods!* + +--- + +## What are all these control plane pods? + +- `kube-apiserver` is the API server + +- `coredns` provides DNS-based service discovery ([replacing kube-dns as of 1.11](https://kubernetes.io/blog/2018/07/10/coredns-ga-for-kubernetes-cluster-dns/)) + + +- the `READY` column indicates the number of containers in each pod + + (1 for most pods, but `coredns` has 3, for instance) + +--- + +## Scoping another namespace + +- We can also look at a different namespace (other than `default`) + +.exercise[ + +- List only the pods in the `kube-system` namespace: + ```bash + kubectl get pods --namespace=kube-system + kubectl get pods -n kube-system + ``` + +] + +--- + +## Namespaces and other `kubectl` commands + +- We can use `-n`/`--namespace` with almost every `kubectl` command + +- Example: + + - `kubectl create --namespace=X` to create something in namespace X + +- We can use `-A`/`--all-namespaces` with most commands that manipulate multiple objects + +- Examples: + + - `kubectl delete` can delete resources across multiple namespaces + + - `kubectl label` can add/remove/update labels across multiple namespaces + +-- + +**These commands will not work for you, as you are restricted by Role Based Authentication to only have write access inside your own namespace.** \ No newline at end of file diff --git a/slides/pks/logistics.md b/slides/pks/logistics.md new file mode 100644 index 000000000..1726024d9 --- /dev/null +++ b/slides/pks/logistics.md @@ -0,0 +1,21 @@ +## Intros + +- This slide should be customized by the tutorial instructor(s). + +- Hello! We are: + + - .emoji[👨🏾‍🎓] Paul Czarkowski ([@pczarkowski](https://twitter.com/pczarkowski), Pivotal Software) + - .emoji[👨🏾‍🎓] Tyler Britten ([@tybritten](https://twitter.com/tybritten), Pivotal Software) + + +- The workshop will run from ... + +- There will be a lunch break at ... + + (And coffee breaks!) + +- Feel free to interrupt for questions at any time + +- *Especially when you see full screen container pictures!* + +- Live feedback, questions, help: @@CHAT@@ diff --git a/slides/pks/octant.md b/slides/pks/octant.md new file mode 100644 index 000000000..2f854fb53 --- /dev/null +++ b/slides/pks/octant.md @@ -0,0 +1,14 @@ +# Octant + +Octant is an open source tool from VMWare which is designed to be a Kubernetes workload visualization tool that runs locally and uses your Kubeconfig to connect to the Kubernetes cluster. + +Octant only ever performs list and read style requests and does not create/modify/delete resources. This makes it a much safer tool to use than the Kubernetes Dashboard. + +.exercise[ + +- Run octant and browse through your resources: + ```bash + octant + ``` + +] \ No newline at end of file diff --git a/slides/pks/ourapponkube.md b/slides/pks/ourapponkube.md new file mode 100644 index 000000000..8cf858b5b --- /dev/null +++ b/slides/pks/ourapponkube.md @@ -0,0 +1,139 @@ +# Running our application on Kubernetes + +- We can now deploy our code (as well as a redis instance) + +.exercise[ + +- Deploy `redis`: + ```bash + kubectl create deployment redis --image=redis + ``` + +- Deploy everything else: + ```bash + for SERVICE in hasher rng webui worker; do + kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG + done + ``` + +] + +--- + +## Is this working? + +- After waiting for the deployment to complete, let's look at the logs! + + (Hint: use `kubectl get deploy -w` to watch deployment events) + +.exercise[ + + + +- Look at some logs: + ```bash + kubectl logs deploy/rng + kubectl logs deploy/worker + ``` + +] + +-- + +🤔 `rng` is fine ... But not `worker`. + +-- + +💡 Oh right! We forgot to `expose`. + +--- + +## Connecting containers together + +- Three deployments need to be reachable by others: `hasher`, `redis`, `rng` + +- `worker` doesn't need to be exposed + +- `webui` will be dealt with later + +.exercise[ + +- Expose each deployment, specifying the right port: + ```bash + kubectl expose deployment redis --port 6379 + kubectl expose deployment rng --port 80 + kubectl expose deployment hasher --port 80 + ``` + +] + +--- + +## Is this working yet? + +- The `worker` has an infinite loop, that retries 10 seconds after an error + +.exercise[ + +- Stream the worker's logs: + ```bash + kubectl logs deploy/worker --follow + ``` + + (Give it about 10 seconds to recover) + + + +] + +-- + +We should now see the `worker`, well, working happily. + +--- + +## Exposing services for external access + +- Now we would like to access the Web UI + +- We will use `kubectl port-forward` because we don't want the whole world to see it. + +.exercise[ + +- Create a port forward for the Web UI: + ```bash + kubectl port-forward deploy/webui 8888:80 + ``` +- In a new terminal check you can access it: + ```bash + curl localhost:8888 + ``` +] + +-- + +The output `Found. Redirecting to /index.html` tells us the port forward worked. + +--- + +## Accessing the web UI + +- We can now access the web UI from the port-forward. But nobody else can. + +.exercise[ + +- Open the web UI in your browser (http://localhost:8888/) + + + +] + +-- + +*Alright, we're back to where we started, when we were running on a single node!* diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md new file mode 100644 index 000000000..9607699da --- /dev/null +++ b/slides/pks/prereqs.md @@ -0,0 +1,115 @@ +# Pre-requirements + +- Be comfortable with the UNIX command line + + - navigating directories + + - editing files + + - a little bit of bash-fu (environment variables, loops) + +- Some Docker knowledge + + - `docker run`, `docker ps`, `docker build` + + - ideally, you know how to write a Dockerfile and build it +
+ (even if it's a `FROM` line and a couple of `RUN` commands) + +- It's totally OK if you are not a Docker expert! + +--- + +## software pre-requirements + +- You'll need the following software installed on your local laptop: + +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +* [helm](https://helm.sh/docs/using_helm/#installing-helm) + +- Bonus tools + +* [octant](https://github.com/vmware/octant#installation) +* [stern]() +* [jq]() + +--- + +class: title + +*Tell me and I forget.* +
+*Teach me and I remember.* +
+*Involve me and I learn.* + +Misattributed to Benjamin Franklin + +[(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/) + +--- + +## Hands-on sections + +- The whole workshop is hands-on + +- You are invited to reproduce all the demos + +- You will be using conference wifi and a shared kubernetes cluster. Please be kind to both. + +- All hands-on sections are clearly identified, like the gray rectangle below + +.exercise[ + +- This is the stuff you're supposed to do! + +- Go to @@SLIDES@@ to view these slides + +- Join the chat room: @@CHAT@@ + + + +] + +--- + +class: in-person + +## Where are we going to run our containers? + +--- + +class: in-person + +## shared cluster dedicated to this workshop + +- A large Pivotal Container Service (PKS) cluster deployed to Google Cloud. + +- It remain up for the duration of the workshop + +- You should have a little card with login+password+URL + +- Logging into this URL will give you a downloadable kubeconfig file. + +--- + +class: in-person + +## Why don't we run containers locally? + +- Installing this stuff can be hard on some machines + + (32 bits CPU or OS... Laptops without administrator access... etc.) + +- *"The whole team downloaded all these container images from the WiFi! +
... and it went great!"* (Literally no-one ever) + +- All you need is a computer (or even a phone or tablet!), with: + + - an internet connection + + - a web browser + + - kubectl + + - helm diff --git a/slides/pks/scalingdockercoins.md b/slides/pks/scalingdockercoins.md new file mode 100644 index 000000000..527058c8e --- /dev/null +++ b/slides/pks/scalingdockercoins.md @@ -0,0 +1,241 @@ +# Scaling our demo app + +- Our ultimate goal is to get more DockerCoins + + (i.e. increase the number of loops per second shown on the web UI) + +- Let's look at the architecture again: + + ![DockerCoins architecture](images/dockercoins-diagram.svg) + +- The loop is done in the worker; + perhaps we could try adding more workers? + +--- + +## Adding another worker + +- All we have to do is scale the `worker` Deployment + +.exercise[ + +- Open two new terminals to check what's going on with pods and deployments: + ```bash + kubectl get pods -w + kubectl get deployments -w + ``` + + + +- Now, create more `worker` replicas: + ```bash + kubectl scale deployment worker --replicas=2 + ``` + +] + +After a few seconds, the graph in the web UI should show up. + +--- + +## Adding more workers + +- If 2 workers give us 2x speed, what about 3 workers? + +.exercise[ + +- Scale the `worker` Deployment further: + ```bash + kubectl scale deployment worker --replicas=3 + ``` + +] + +The graph in the web UI should go up again. + +(This is looking great! We're gonna be RICH!) + +--- + +## Adding even more workers + +- Let's see if 10 workers give us 10x speed! + +.exercise[ + +- Scale the `worker` Deployment to a bigger number: + ```bash + kubectl scale deployment worker --replicas=10 + ``` + +] + +-- + +The graph will peak at 10 hashes/second. + +(We can add as many workers as we want: we will never go past 10 hashes/second.) + +--- + +class: extra-details + +## Didn't we briefly exceed 10 hashes/second? + +- It may *look like it*, because the web UI shows instant speed + +- The instant speed can briefly exceed 10 hashes/second + +- The average speed cannot + +- The instant speed can be biased because of how it's computed + +--- + +class: extra-details + +## Why instant speed is misleading + +- The instant speed is computed client-side by the web UI + +- The web UI checks the hash counter once per second +
+ (and does a classic (h2-h1)/(t2-t1) speed computation) + +- The counter is updated once per second by the workers + +- These timings are not exact +
+ (e.g. the web UI check interval is client-side JavaScript) + +- Sometimes, between two web UI counter measurements, +
+ the workers are able to update the counter *twice* + +- During that cycle, the instant speed will appear to be much bigger +
+ (but it will be compensated by lower instant speed before and after) + +--- + +## Why are we stuck at 10 hashes per second? + +- If this was high-quality, production code, we would have instrumentation + + (Datadog, Honeycomb, New Relic, statsd, Sumologic, ...) + +- It's not! + +- Perhaps we could benchmark our web services? + + (with tools like `ab`, or even simpler, `httping`) + +--- + +## Benchmarking our web services + +- We want to check `hasher` and `rng` + +- We are going to use `httping` + +- It's just like `ping`, but using HTTP `GET` requests + + (it measures how long it takes to perform one `GET` request) + +- It's used like this: + ``` + httping [-c count] http://host:port/path + ``` + +- Or even simpler: + ``` + httping ip.ad.dr.ess + ``` + +- We will use `httping` on the ClusterIP addresses of our services + +--- + +## Running a debug pod + +We don't have direct access to ClusterIP services, nor do we want to run a bunch of port-forwards. Instead we can run a Pod containing `httping` and then use `kubectl exec` to perform our debugging. + +.excercise[ + +- Run a debug pod + ```bash + kubectl run debug --image=paulczar/debug \ + --restart=Never -- sleep 6000 + ``` + +] + +-- + +This will run our debug pod which contains tools like `httping` that will self-destruct after 6000 seconds. + +--- + +### Executing a command in a running pod + +- You may have need to occasionally run a command inside a pod. Rather than trying to run `SSH` inside a container you can use the `kubectl exec` command. + +.excercise[ + + - Run curl inside your debug pod: + ```bash + kubectl exec debug -- curl -s https://google.com + ``` +] + +-- + +```html + +301 Moved +

301 Moved

+The document has moved +here. + +``` + +--- + +## Service Discovery + +- Each of our services has a Cluster IP which we could get using `kubectl get services` + +- Or do it programmatically, like so: + ```bash + HASHER=$(kubectl get svc hasher -o go-template={{.spec.clusterIP}}) + RNG=$(kubectl get svc rng -o go-template={{.spec.clusterIP}}) + ``` + +- However Kubernetes has an in-cluster DNS server which means if you're inside the cluster you can simple use the service name as an endpoint. + +--- + +## Checking `hasher` and `rng` response times + +.exercise[ + +- Check the response times for both services: + ```bash + kubectl exec debug -- httping -c 3 hasher + kubectl exec debug -- httping -c 3 rng + ``` + +] + +-- + +- `hasher` is fine (it should take a few milliseconds to reply) + +- `rng` is not (it should take about 700 milliseconds if there are 10 workers) + +- Something is wrong with `rng`, but ... what? diff --git a/slides/pks/setup-k8s.md b/slides/pks/setup-k8s.md new file mode 100644 index 000000000..8a6f5b875 --- /dev/null +++ b/slides/pks/setup-k8s.md @@ -0,0 +1,108 @@ +# Setting up Kubernetes + +How did we set up these Kubernetes clusters that we're using? + +-- + +- We used Pivotal Container Service (PKS) a multicloud Kubernetes broker. + +- But first we Created a GKE Kubernetes cluster + - We installed the Google Cloud Operator on GKE + - We installed PKS using the GCP Operator + - We installed this Kubernetes cluster using PKS + +--- + +# Setting up Kubernetes + +- How can I set up a basic Kubernetes lab at home? + +-- + + + +- Run `kubeadm` on freshly installed VM instances running Ubuntu LTS + + 1. Install Docker + + 2. Install Kubernetes packages + + 3. Run `kubeadm init` on the first node (it deploys the control plane on that node) + + 4. Set up Weave (the overlay network) +
+ (that step is just one `kubectl apply` command; discussed later) + + 5. Run `kubeadm join` on the other nodes (with the token produced by `kubeadm init`) + + 6. Copy the configuration file generated by `kubeadm init` + +- Check the [prepare VMs README](https://@@GITREPO@@/blob/master/prepare-vms/README.md) for more details + +--- + +## `kubeadm` drawbacks + +- Doesn't set up Docker or any other container engine + +- Doesn't set up the overlay network + +- Doesn't set up multi-master (no high availability) + +-- + + (At least ... not yet! Though it's [experimental in 1.12](https://kubernetes.io/docs/setup/independent/high-availability/).) + +-- + +- "It's still twice as many steps as setting up a Swarm cluster 😕" -- Jérôme + +--- + +## Other deployment options + +- [AKS](https://azure.microsoft.com/services/kubernetes-service/): + managed Kubernetes on Azure + +- [GKE](https://cloud.google.com/kubernetes-engine/): + managed Kubernetes on Google Cloud + +- [EKS](https://aws.amazon.com/eks/), + [eksctl](https://eksctl.io/): + managed Kubernetes on AWS + +- [kops](https://github.com/kubernetes/kops): + customizable deployments on AWS, Digital Ocean, GCE (beta), vSphere (alpha) + +- [minikube](https://kubernetes.io/docs/setup/minikube/), + [kubespawn](https://github.com/kinvolk/kube-spawn), + [Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/): + for local development + +- [kubicorn](https://github.com/kubicorn/kubicorn), + the [Cluster API](https://blogs.vmware.com/cloudnative/2019/03/14/what-and-why-of-cluster-api/): + deploy your clusters declaratively, "the Kubernetes way" + +--- + +## Even more deployment options + +- If you like Ansible: + [kubespray](https://github.com/kubernetes-incubator/kubespray) + +- If you like Terraform: + [typhoon](https://github.com/poseidon/typhoon) + +- If you like Terraform and Puppet: + [tarmak](https://github.com/jetstack/tarmak) + +- You can also learn how to install every component manually, with + the excellent tutorial [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) + + *Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.* + +- There are also many commercial options available! + +- For a longer list, check the Kubernetes documentation: +
+ it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/#production-environment) to set up Kubernetes. From dffc054ff7a2bcf14eb5ff3977036ed1e8a6960c Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Wed, 28 Aug 2019 10:34:52 -0500 Subject: [PATCH 07/14] scripts for preparing pks cluster --- prepare-pks/README.md | 36 ++++++++++++++++++ prepare-pks/users/create.sh | 10 +++++ prepare-pks/users/delete.sh | 10 +++++ prepare-pks/users/helm.sh | 31 +++++++++++++++ prepare-pks/users/random-users.sh | 11 ++++++ prepare-pks/users/user-role-etc.yaml | 57 ++++++++++++++++++++++++++++ prepare-pks/users/users.txt | 2 + 7 files changed, 157 insertions(+) create mode 100644 prepare-pks/README.md create mode 100755 prepare-pks/users/create.sh create mode 100755 prepare-pks/users/delete.sh create mode 100755 prepare-pks/users/helm.sh create mode 100755 prepare-pks/users/random-users.sh create mode 100644 prepare-pks/users/user-role-etc.yaml create mode 100644 prepare-pks/users/users.txt diff --git a/prepare-pks/README.md b/prepare-pks/README.md new file mode 100644 index 000000000..688c27945 --- /dev/null +++ b/prepare-pks/README.md @@ -0,0 +1,36 @@ +# Instructions for preparing a PKS Kubernetes Cluster + +## pre-reqs + +* ingress controller (nginx or nsxt) +* gangway (or similar for kubeconfig files) + +## Create users + +This example will create 50 random users in UAAC and corresponding Kubernetes users and rbac. + +```bash +$ cd users +$ ./random-users.sh 50 +... +... +$ ./create.sh +... +... +``` + +This will install helm tiller for each: + +```bash +$ ./helm.sh +... +... +``` + +This will clean up: + +```bash +$ ./delete.sh +... +... +``` diff --git a/prepare-pks/users/create.sh b/prepare-pks/users/create.sh new file mode 100755 index 000000000..98523fe61 --- /dev/null +++ b/prepare-pks/users/create.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + echo "--> Adding user $col1 with password $col2" + echo "====> UAAC" + uaac user add $col1 --emails $col1@pks -p $col2 + echo "====> Kubernetes" + cat user-role-etc.yaml | sed "s/__username__/$col1/" | kubectl apply -f - +done < users.txt diff --git a/prepare-pks/users/delete.sh b/prepare-pks/users/delete.sh new file mode 100755 index 000000000..6fff38f54 --- /dev/null +++ b/prepare-pks/users/delete.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + echo "--> Deleting user $col1 with password $col2" + echo "====> UAAC" + uaac user delete $col1 + echo "====> Kubernetes" + cat user-role-etc.yaml | sed "s/__username__/$col1/" | kubectl delete -f - +done < users.txt \ No newline at end of file diff --git a/prepare-pks/users/helm.sh b/prepare-pks/users/helm.sh new file mode 100755 index 000000000..cdda4ede1 --- /dev/null +++ b/prepare-pks/users/helm.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +while IFS=, read -r col1 col2 +do + +kubectl -n $col1 create serviceaccount tiller + +kubectl -n $col1 create role tiller --verb '*' --resource '*' + +kubectl -n $col1 create rolebinding tiller --role tiller --serviceaccount ${col1}:tiller + +kubectl create clusterrole ns-tiller --verb 'get,list' --resource namespaces + +kubectl create clusterrolebinding tiller --clusterrole ns-tiller --serviceaccount ${col1}:tiller + +helm init --service-account=tiller --tiller-namespace=$col1 + +kubectl -n $col1 delete service tiller-deploy + +kubectl -n $col1 patch deployment tiller-deploy --patch ' +spec: + template: + spec: + containers: + - name: tiller + ports: [] + command: ["/tiller"] + args: ["--listen=localhost:44134"] +' + +done < users.txt diff --git a/prepare-pks/users/random-users.sh b/prepare-pks/users/random-users.sh new file mode 100755 index 000000000..e299afc19 --- /dev/null +++ b/prepare-pks/users/random-users.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [[ -z $1 ]]; then + echo "Usage: ./random-names.sh 55" + exit 1 +fi + +for i in {1..50}; do + PW=`cat /dev/urandom | tr -dc 'a-zA-Z1-9' | fold -w 10 | head -n 1` + echo "user$i,$PW" +done diff --git a/prepare-pks/users/user-role-etc.yaml b/prepare-pks/users/user-role-etc.yaml new file mode 100644 index 000000000..0b5dc75cc --- /dev/null +++ b/prepare-pks/users/user-role-etc.yaml @@ -0,0 +1,57 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: __username__ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbac-user-namespace +rules: +- apiGroups: ["", "extensions", "apps", "batch", "autoscaling","networking.k8s.io"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbac-user-cluster +rules: +- apiGroups: ["", "extensions", "apps"] + resources: ["*"] + verbs: ["list"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["list","get"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["*"] + verbs: ["list"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __username__ + namespace: __username__ +subjects: +- kind: User + name: __username__ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: rbac-user-namespace + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __username__ + namespace: __username__ +subjects: +- kind: User + name: __username__ + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: rbac-user-cluster + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/prepare-pks/users/users.txt b/prepare-pks/users/users.txt new file mode 100644 index 000000000..450cdc67d --- /dev/null +++ b/prepare-pks/users/users.txt @@ -0,0 +1,2 @@ +user1,user1-password +user2,user2-password From 88dd40e32d9bc966e0966d780a385a1020ce03ff Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Thu, 29 Aug 2019 16:45:57 -0500 Subject: [PATCH 08/14] helm for pks --- slides/kube-fullday-namespaced.yml | 1 + slides/pks/helm-wordpress.md | 109 +++++++++++++++++++++++++++++ slides/pks/helm.md | 17 ++--- slides/pks/wp/values.yaml | 18 +++++ 4 files changed, 137 insertions(+), 8 deletions(-) create mode 100644 slides/pks/helm-wordpress.md create mode 100644 slides/pks/wp/values.yaml diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml index f71db8513..d7fa9fe62 100644 --- a/slides/kube-fullday-namespaced.yml +++ b/slides/kube-fullday-namespaced.yml @@ -85,6 +85,7 @@ chapters: #- k8s/owners-and-dependents.md #- k8s/gitworkflows.md - pks/helm.md + - pks/helm-wordpress.md - - k8s/whatsnext.md - k8s/links.md diff --git a/slides/pks/helm-wordpress.md b/slides/pks/helm-wordpress.md new file mode 100644 index 000000000..882a68da3 --- /dev/null +++ b/slides/pks/helm-wordpress.md @@ -0,0 +1,109 @@ +## Why wordpress its 2019?!?! + +I know ... funny right :) + +--- + +## Helm install notes + +- You'll notice a helpful message after running `helm install` that looks something like this: + +``` +NOTES: +1. Get the WordPress URL: + + echo "WordPress URL: http://127.0.0.1:8080/" + echo "WordPress Admin URL: http://127.0.0.1:8080/admin" + kubectl port-forward --namespace user1 svc/wp-wordpress 8080:80 + +2. Login with the following credentials to see your blog + + echo Username: user + echo Password: $(kubectl get secret --namespace user1 wp-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode) +``` + +-- + +Helm charts generally have a `NOTES.txt` template that is rendered out and displayed after helm commands are run. Pretty neat. + +--- + +## What did helm install ? + +- Run `kubectl get all` to check what resources helm installed + +.exercise[ + - Run `kubectl get all`: + ```bash + kubectl get all + ``` + +] +--- + +## What did helm install ? + +``` +NAME READY STATUS RESTARTS AGE +pod/wp-mariadb-0 1/1 Running 0 11m +pod/wp-wordpress-6cb9cfc94-chbr6 1/1 Running 0 11m + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/wp-mariadb ClusterIP 10.100.200.87 3306/TCP 11m +service/wp-wordpress ClusterIP 10.100.200.131 80/TCP,443/TCP 11m + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/wp-wordpress 1/1 1 1 11m + +NAME DESIRED CURRENT READY AGE +replicaset.apps/tiller-deploy-6487f7bfd8 1 1 1 2d6h +replicaset.apps/tiller-deploy-75ccf68856 0 0 0 2d6h +replicaset.apps/wp-wordpress-6cb9cfc94 1 1 1 11m + +NAME READY AGE +statefulset.apps/wp-mariadb 1/1 11m + +``` + +--- + +## Check if wordpress is working + +- Using the notes provided from helm check you can access your wordpress and login as `user` + +.exercise[ + - run the commands provided by the helm summary: + ```bash + echo Username: user + echo Password: $(kubectl get secret --namespace user1 wp-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode) + + kubectl port-forward --namespace user1 svc/wp-wordpress 8080:80 + ``` +] + +-- + +Yay? you have a 2003 era blog + +--- + +## Helm Chart Values + +Settings values on the command line is okay for a demonstration, but we should really be creating a `~/workshop/values.yaml` file for our chart. Let's do that now. + +> the values file is a bit long to copy/paste from here, so lets wget it. + +.exercise[ + - Download the values.yaml file and edit it, changing the URL prefix to be `-wp`: + ```bash + wget -O ~/workshop/values.yaml \ + https://raw.githubusercontent.com/paulczar/container.training/pks/slides/pks/wp/values.yaml + + vim ~/workshop/values.yaml + + helm upgrade wp stable/wordpress -f ~/workshop/values.yaml + + ``` +] + +--- \ No newline at end of file diff --git a/slides/pks/helm.md b/slides/pks/helm.md index 6cab8141e..2ad003e5f 100644 --- a/slides/pks/helm.md +++ b/slides/pks/helm.md @@ -154,6 +154,7 @@ fine for personal and development clusters.) - Add the stable repo ```bash helm repo add stable https://kubernetes-charts.storage.googleapis.com/ + helm repo update ``` ] @@ -165,16 +166,16 @@ fine for personal and development clusters.) - Most charts require persistent volumes to store data -- We need to relax these requirements a bit +- We can relax these requirements a bit .exercise[ -- Install the Prometheus metrics collector on our cluster: +- Install on our cluster: ```bash - helm install stable/prometheus \ - prometheus \ - --set server.service.type=ClusterIP \ - --set server.persistentVolume.enabled=false + helm install wp stable/wordpress \ + --set service.type=ClusterIP \ + --set persistence.enabled=false \ + --set mariadb.master.persistence.enabled=false ``` ] @@ -189,9 +190,9 @@ Where do these `--set` options come from? .exercise[ -- See the metadata and all available options for `stable/prometheus`: +- See the metadata and all available options for `stable/wordpress`: ```bash - helm inspect stable/prometheus + helm inspect stable/wordpress ``` ] diff --git a/slides/pks/wp/values.yaml b/slides/pks/wp/values.yaml new file mode 100644 index 000000000..cbc37988b --- /dev/null +++ b/slides/pks/wp/values.yaml @@ -0,0 +1,18 @@ +service: + type: ClusterIP +persistence: + enabled: false +mariadb: + master: + persistence: + enabled: false +ingress: + enabled: true + certManager: true + hosts: + - name: user1-wp.ingress.workshop.paulczar.wtf + path: / + tls: + - hosts: + - user1-wp.ingress.workshop.paulczar.wtf + secretName: wordpress-tls \ No newline at end of file From cef9ae2ddff90fa4575765602246042ced4277ab Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 3 Sep 2019 16:10:09 -0500 Subject: [PATCH 09/14] helm prereqs --- slides/pks/helm.md | 2 +- slides/pks/prereqs.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/slides/pks/helm.md b/slides/pks/helm.md index 2ad003e5f..51ab4e951 100644 --- a/slides/pks/helm.md +++ b/slides/pks/helm.md @@ -177,9 +177,9 @@ fine for personal and development clusters.) --set persistence.enabled=false \ --set mariadb.master.persistence.enabled=false ``` - ] + Where do these `--set` options come from? --- diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md index 9607699da..deff59632 100644 --- a/slides/pks/prereqs.md +++ b/slides/pks/prereqs.md @@ -30,8 +30,8 @@ - Bonus tools * [octant](https://github.com/vmware/octant#installation) -* [stern]() -* [jq]() +* [stern](https://github.com/wercker/stern/releases/tag/1.11.0) +* [jq](https://stedolan.github.io/jq/download/) --- From e1c1db913054f6affcf635a918427e26b94aa675 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 11 Feb 2020 12:30:18 -0600 Subject: [PATCH 10/14] update to pks docs --- slides/kube-fullday-namespaced.yml | 9 ++++----- slides/pks/connecting.md | 2 +- slides/pks/logistics.md | 10 ---------- slides/pks/prereqs.md | 2 +- slides/pks/title.md | 23 +++++++++++++++++++++++ 5 files changed, 29 insertions(+), 17 deletions(-) create mode 100644 slides/pks/title.md diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml index d7fa9fe62..7570699af 100644 --- a/slides/kube-fullday-namespaced.yml +++ b/slides/kube-fullday-namespaced.yml @@ -1,20 +1,19 @@ title: | - Deploying and Scaling Microservices - with Kubernetes + Config Management Camp - Kubernetes Workshop #chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" #chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" chat: "In person!" -gitrepo: github.com/jpetazzo/container.training +gitrepo: github.com/paulczar/container.training -slides: http://container.training/ +slides: https://k8s.camp/cfgcamp/ exclude: - self-paced chapters: -- shared/title.md +- pks/title.md - pks/logistics.md - k8s/intro.md - shared/about-slides.md diff --git a/slides/pks/connecting.md b/slides/pks/connecting.md index 89d32f799..1ee08026d 100644 --- a/slides/pks/connecting.md +++ b/slides/pks/connecting.md @@ -4,7 +4,7 @@ class: in-person .exercise[ -- Log into https://workshop.paulczar.wtf with your provided credentials +- Log into https://gangway.workshop.paulczar.wtf with your provided credentials (sorry about the [self-signed cert](https://gist.githubusercontent.com/paulczar/6e3f48a03e544627952aaa399a29a4af/raw/9e530371d8929ab573a205238dd0f2c718edc64c/ca.cert)) - Follow the instructions on the auth portal to set up a `kubeconfig` file. diff --git a/slides/pks/logistics.md b/slides/pks/logistics.md index 1726024d9..e8071a57b 100644 --- a/slides/pks/logistics.md +++ b/slides/pks/logistics.md @@ -1,21 +1,11 @@ ## Intros -- This slide should be customized by the tutorial instructor(s). - - Hello! We are: - .emoji[👨🏾‍🎓] Paul Czarkowski ([@pczarkowski](https://twitter.com/pczarkowski), Pivotal Software) - .emoji[👨🏾‍🎓] Tyler Britten ([@tybritten](https://twitter.com/tybritten), Pivotal Software) -- The workshop will run from ... - -- There will be a lunch break at ... - - (And coffee breaks!) - - Feel free to interrupt for questions at any time - *Especially when you see full screen container pictures!* - -- Live feedback, questions, help: @@CHAT@@ diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md index deff59632..a969f44b5 100644 --- a/slides/pks/prereqs.md +++ b/slides/pks/prereqs.md @@ -25,7 +25,7 @@ - You'll need the following software installed on your local laptop: * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -* [helm](https://helm.sh/docs/using_helm/#installing-helm) +* [helm 3](https://helm.sh/docs/using_helm/#installing-helm) - Bonus tools diff --git a/slides/pks/title.md b/slides/pks/title.md new file mode 100644 index 000000000..568203a94 --- /dev/null +++ b/slides/pks/title.md @@ -0,0 +1,23 @@ +class: title, self-paced + +@@TITLE@@ + +.nav[*Self-paced version*] + +--- + +class: title, in-person + +@@TITLE@@

+ +.footnote[ +**Be kind to the WiFi!**
+ +*Don't use your hotspot.*
+*Don't stream videos or download big files during the workshop[.](https://www.youtube.com/watch?v=h16zyxiwDLY)*
+*Thank you!* + +**Slides: @@SLIDES@@**
+**Credentials: https://tinyurl.com/k8scamp**
+**Login: https://gangway.workshop.demo.paulczar.wtf** +] From 0cef956067ff38842948496a2e2a24f59630bbaa Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 21 Apr 2020 12:40:04 -0500 Subject: [PATCH 11/14] s1t edition Signed-off-by: Paul Czarkowski --- slides/kube-fullday-namespaced.yml | 3 +- slides/markmaker.py | 2 + slides/pks/connecting.md | 38 +- slides/pks/dashboard.md | 56 +-- slides/pks/helm-intro.md | 346 +++++++++++++++ slides/pks/httpenv-update.md | 387 +++++++++++++++++ slides/pks/kubectlexpose.md | 193 +++++---- slides/pks/kubectlget.md | 26 +- slides/pks/kubectlrun.md | 614 +++++++++++++++++++++++++++ slides/pks/kubercoins.md | 244 +++++++++++ slides/pks/logistics.md | 4 +- slides/pks/logs-centralized.md | 147 +++++++ slides/pks/octant.md | 7 +- slides/pks/prereqs.md | 5 +- slides/pks/sampleapp.md | 145 +++++++ slides/pks/security-kubectl-apply.md | 52 +++ slides/spring-one-tour.yml | 62 +++ 17 files changed, 2161 insertions(+), 170 deletions(-) create mode 100644 slides/pks/helm-intro.md create mode 100644 slides/pks/httpenv-update.md create mode 100644 slides/pks/kubectlrun.md create mode 100644 slides/pks/kubercoins.md create mode 100644 slides/pks/logs-centralized.md create mode 100644 slides/pks/sampleapp.md create mode 100644 slides/pks/security-kubectl-apply.md create mode 100644 slides/spring-one-tour.yml diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml index 7570699af..552e2bf65 100644 --- a/slides/kube-fullday-namespaced.yml +++ b/slides/kube-fullday-namespaced.yml @@ -27,8 +27,7 @@ chapters: #- shared/composescale.md #- shared/hastyconclusions.md #- shared/composedown.md - - pks/concepts-k8s.md - - pks/kubectlget.md + - - k8s/kubectlrun.md - k8s/logs-cli.md diff --git a/slides/markmaker.py b/slides/markmaker.py index 07e780625..bc2737179 100755 --- a/slides/markmaker.py +++ b/slides/markmaker.py @@ -197,6 +197,8 @@ def processchapter(chapter, filename): else: repo = subprocess.check_output(["git", "config", "remote.origin.url"]).decode("ascii") repo = repo.strip().replace("git@github.com:", "https://github.com/") + regex = re.compile('\.git$') + repo = regex.sub("", repo) if "BRANCH" in os.environ: branch = os.environ["BRANCH"] else: diff --git a/slides/pks/connecting.md b/slides/pks/connecting.md index 1ee08026d..810bf5d37 100644 --- a/slides/pks/connecting.md +++ b/slides/pks/connecting.md @@ -4,19 +4,16 @@ class: in-person .exercise[ -- Log into https://gangway.workshop.paulczar.wtf with your provided credentials (sorry about the [self-signed cert](https://gist.githubusercontent.com/paulczar/6e3f48a03e544627952aaa399a29a4af/raw/9e530371d8929ab573a205238dd0f2c718edc64c/ca.cert)) +- Log into https://gangway.workshop.paulczar.wtf with your provided credentials. - Follow the instructions on the auth portal to set up a `kubeconfig` file. -- Check that you can connect to the cluster with `kubectl get nodes`: +- Check that you can connect to the cluster with `kubectl cluster-info`: ```bash -$ kubectl get nodes -NAME STATUS ROLES AGE VERSION -vm-0f2b473c-5ae6-4af3-4e80-f0a068b03abe Ready 23h v1.14.5 -vm-25cfc8d6-88c0-45f6-4305-05e859af7f2c Ready 23h v1.14.5 -... -... +$ kubectl cluster-info +Kubernetes master is running at https://k8s.cluster1.demo.paulczar.wtf:8443 +CoreDNS is running at https://k8s.cluster1.demo.paulczar.wtf:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy ``` ] @@ -24,6 +21,31 @@ If anything goes wrong — ask for help! --- +## Role Based Authorization Control + +You are restricted to a subset of Kubernetes resources in your own namespace. Just like in a real world enterprise cluster. + + +.exercise[ + +1\. Can you create pods? + +``` +$ kubectl auth can-i create pods +``` + +2\. Can you delete namespaces? + +``` +$ kubectl auth can-i delete namespaces +``` +] +-- + +1. You can create pods in your own namespace. +2. You cannot delete namespaces. +--- + ## Doing or re-doing the workshop on your own? - Use something like diff --git a/slides/pks/dashboard.md b/slides/pks/dashboard.md index 18cca1d72..7df739227 100644 --- a/slides/pks/dashboard.md +++ b/slides/pks/dashboard.md @@ -1,6 +1,6 @@ # The Kubernetes dashboard -- Kubernetes resources can also be viewed with a web dashboard +- Kubernetes resources can be viewed with a web dashboard - That dashboard is usually exposed over HTTPS @@ -16,7 +16,7 @@ You know what, this is all a very bad idea. Let's not run the Kubernetes dashboard at all ... ever. -The following slides are informational. Do not run them. +The following slides are informational. **Do not run them**. --- @@ -111,56 +111,6 @@ The dashboard will then ask you which authentication you want to use.
check [this excellent post on Heptio's blog](https://blog.heptio.com/on-securing-the-kubernetes-dashboard-16b09b1b7aca) ---- - -# Security implications of `kubectl apply` - -- When we do `kubectl apply -f `, we create arbitrary resources - -- Resources can be evil; imagine a `deployment` that ... - --- - - - starts bitcoin miners on the whole cluster - --- - - - hides in a non-default namespace - --- - - - bind-mounts our nodes' filesystem - --- - - - inserts SSH keys in the root account (on the node) - -- - - encrypts our data and ransoms it - --- - - - ☠️☠️☠️ - ---- - -## `kubectl apply` is the new `curl | sh` - -- `curl | sh` is convenient - -- It's safe if you use HTTPS URLs from trusted sources - --- - -- `kubectl apply -f` is convenient - -- It's safe if you use HTTPS URLs from trusted sources - -- Example: the official setup instructions for most pod networks - --- - -- It introduces new failure modes - - (for instance, if you try to apply YAML from a link that's no longer valid) +- Or better yet, don't use the dashboard. Use Octant. \ No newline at end of file diff --git a/slides/pks/helm-intro.md b/slides/pks/helm-intro.md new file mode 100644 index 000000000..e1b0bc869 --- /dev/null +++ b/slides/pks/helm-intro.md @@ -0,0 +1,346 @@ +# Managing stacks with Helm + +- We created our first resources with `kubectl run`, `kubectl expose` ... + +- We have also created resources by loading YAML files with `kubectl apply -f` + +- For larger stacks, managing thousands of lines of YAML is unreasonable + +- These YAML bundles need to be customized with variable parameters + + (E.g.: number of replicas, image version to use ...) + +- It would be nice to have an organized, versioned collection of bundles + +- It would be nice to be able to upgrade/rollback these bundles carefully + +- [Helm](https://helm.sh/) is an open source project offering all these things! + +--- + +## Helm concepts + +- `helm` is a CLI tool + +- It is used to find, install, upgrade *charts* + +- A chart is an archive containing templatized YAML bundles + +- Charts are versioned + +- Charts can be stored on private or public repositories + +--- + +## Differences between charts and packages + +- A package (deb, rpm...) contains binaries, libraries, etc. + +- A chart contains YAML manifests + + (the binaries, libraries, etc. are in the images referenced by the chart) + +- On most distributions, a package can only be installed once + + (installing another version replaces the installed one) + +- A chart can be installed multiple times + +- Each installation is called a *release* + +- This allows to install e.g. 10 instances of MongoDB + + (with potentially different versions and configurations) + +--- + +class: extra-details + +## Wait a minute ... + +*But, on my Debian system, I have Python 2 **and** Python 3. +
+Also, I have multiple versions of the Postgres database engine!* + +Yes! + +But they have different package names: + +- `python2.7`, `python3.8` + +- `postgresql-10`, `postgresql-11` + +Good to know: the Postgres package in Debian includes +provisions to deploy multiple Postgres servers on the +same system, but it's an exception (and it's a lot of +work done by the package maintainer, not by the `dpkg` +or `apt` tools). + +--- + +## Helm 2 vs Helm 3 + +- Helm 3 was released [November 13, 2019](https://helm.sh/blog/helm-3-released/) + +- Charts remain compatible between Helm 2 and Helm 3 + +- The CLI is very similar (with minor changes to some commands) + +- The main difference is that Helm 2 uses `tiller`, a server-side component + +- Helm 3 doesn't use `tiller` at all, making it simpler (yay!) + +--- + +class: extra-details + +## With or without `tiller` + +- With Helm 3: + + - the `helm` CLI communicates directly with the Kubernetes API + + - it creates resources (deployments, services...) with our credentials + +- With Helm 2: + + - the `helm` CLI communicates with `tiller`, telling `tiller` what to do + + - `tiller` then communicates with the Kubernetes API, using its own credentials + +- This indirect model caused significant permissions headaches + + (`tiller` required very broad permissions to function) + +- `tiller` was removed in Helm 3 to simplify the security aspects + +--- + +## Installing Helm + +- If the `helm` CLI is not installed in your environment, install it + +.exercise[ + +- Check if `helm` is installed: + ```bash + helm version + ``` + +- If it's not installed (or its helm 2), run the following command: + ```bash + curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 \ + | bash + ``` + +] + +--- + +## Charts and repositories + +- A *repository* (or repo in short) is a collection of charts + +- It's just a bunch of files + + (they can be hosted by a static HTTP server, or on a local directory) + +- We can add "repos" to Helm, giving them a nickname + +- The nickname is used when referring to charts on that repo + + (for instance, if we try to install `hello/world`, that + means the chart `world` on the repo `hello`; and that repo + `hello` might be something like https://blahblah.hello.io/charts/) + +--- + +## Managing repositories + +- Let's check what repositories we have, and add the `stable` repo + + (the `stable` repo contains a set of official-ish charts) + +.exercise[ + +- List our repos: + ```bash + helm repo list + ``` + +- Add the `stable` repo: + ```bash + helm repo add stable https://kubernetes-charts.storage.googleapis.com/ + ``` + +] + +Adding a repo can take a few seconds (it downloads the list of charts from the repo). + +It's OK to add a repo that already exists (it will merely update it). + +--- + +## Search available charts + +- We can search available charts with `helm search` + +- We need to specify where to search (only our repos, or Helm Hub) + +- Let's search for all charts mentioning tomcat! + +.exercise[ + +- Search for tomcat in the repo that we added earlier: + ```bash + helm search repo tomcat + ``` + +- Search for tomcat on the Helm Hub: + ```bash + helm search hub tomcat + ``` + +] + +[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server. + +--- + +## Charts and releases + +- "Installing a chart" means creating a *release* + +- We need to name that release + + (or use the `--generate-name` to get Helm to generate one for us) + +.exercise[ + +- Install the tomcat chart that we found earlier: + ```bash + helm install java4ever stable/tomcat + ``` + +- List the releases: + ```bash + helm list + ``` + +] + +--- + +## Viewing resources of a release + +- This specific chart labels all its resources with a `release` label + +- We can use a selector to see these resources + +.exercise[ + +- List all the resources created by this release: + ```bash + kubectl get all --selector=release=java4ever + ``` + +] + +Note: this `release` label wasn't added automatically by Helm. +
+It is defined in that chart. In other words, not all charts will provide this label. + +--- + +## Configuring a release + +- By default, `stable/tomcat` creates a service of type `LoadBalancer` + +- We would like to change that to a `NodePort` + +- We could use `kubectl edit service java4ever-tomcat`, but ... + + ... our changes would get overwritten next time we update that chart! + +- Instead, we are going to *set a value* + +- Values are parameters that the chart can use to change its behavior + +- Values have default values + +- Each chart is free to define its own values and their defaults + +--- + +## Checking possible values + +- We can inspect a chart with `helm show` or `helm inspect` + +.exercise[ + +- Look at the README for tomcat: + ```bash + helm show readme stable/tomcat + ``` + +- Look at the values and their defaults: + ```bash + helm show values stable/tomcat + ``` + +] + +The `values` may or may not have useful comments. + +The `readme` may or may not have (accurate) explanations for the values. + +(If we're unlucky, there won't be any indication about how to use the values!) + +--- + +## Setting values + +- Values can be set when installing a chart, or when upgrading it + +- We are going to update `java4ever` to change the type of the service + +.exercise[ + +- Update `java4ever`: + ```bash + helm upgrade java4ever stable/tomcat --set service.type=NodePort + ``` + +] + +Note that we have to specify the chart that we use (`stable/tomcat`), +even if we just want to update some values. + +We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values. + +All unspecified values will take the default values defined in the chart. + +--- + +## Connecting to tomcat + +- Let's check the tomcat server that we just installed + +- Note: its readiness probe has a 60s delay + + (so it will take 60s after the initial deployment before the service works) + +.exercise[ + +- Check the node port allocated to the service: + ```bash + kubectl get service java4ever-tomcat + PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort}) + ``` + +- Connect to it, checking the demo app on `/sample/`: + ```bash + curl localhost:$PORT/sample/ + ``` + +] diff --git a/slides/pks/httpenv-update.md b/slides/pks/httpenv-update.md new file mode 100644 index 000000000..de8ea6b84 --- /dev/null +++ b/slides/pks/httpenv-update.md @@ -0,0 +1,387 @@ +# Rolling updates + +- By default (without rolling updates), when a scaled resource is updated: + + - new pods are created + + - old pods are terminated + + - ... all at the same time + + - if something goes wrong, ¯\\\_(ツ)\_/¯ + +--- + +## Rolling updates + +- With rolling updates, when a Deployment is updated, it happens progressively + +- The Deployment controls multiple Replica Sets + +- Each Replica Set is a group of identical Pods + + (with the same image, arguments, parameters ...) + +- During the rolling update, we have at least two Replica Sets: + + - the "new" set (corresponding to the "target" version) + + - at least one "old" set + +- We can have multiple "old" sets + + (if we start another update before the first one is done) + +--- + +## Update strategy + +- Two parameters determine the pace of the rollout: `maxUnavailable` and `maxSurge` + +- They can be specified in absolute number of pods, or percentage of the `replicas` count + +- At any given time ... + + - there will always be at least `replicas`-`maxUnavailable` pods available + + - there will never be more than `replicas`+`maxSurge` pods in total + + - there will therefore be up to `maxUnavailable`+`maxSurge` pods being updated + +- We have the possibility of rolling back to the previous version +
(if the update fails or is unsatisfactory in any way) + +--- + +## Checking current rollout parameters + +- Recall how we build custom reports with `kubectl` and `jq`: + +.exercise[ + +- Show the rollout plan for our deployments: + ```bash + kubectl get deploy -o json | + jq ".items[] | {name:.metadata.name} + .spec.strategy.rollingUpdate" + ``` + +] + +--- + +## Rolling updates in practice + +- As of Kubernetes 1.8, we can do rolling updates with: + + `deployments`, `daemonsets`, `statefulsets` + +- Editing one of these resources will automatically result in a rolling update + +- Rolling updates can be monitored with the `kubectl rollout` subcommand + +--- + +## Rolling out the new `worker` service + +.exercise[ + +- Let's monitor what's going on by opening a few terminals, and run: + ```bash + kubectl get pods -w + kubectl get replicasets -w + kubectl get deployments -w + ``` + + + +- Update `httpenv` either with `kubectl edit`, or by running: + ```bash + kubectl set env -e "hello=world" deployment httpenv + ``` +] +-- + + +Deployments treat environment variable changes as a upgrade. You should see the rollout occur. + +--- + +## Verify rollout + +- Remember our `httpenv` app prints out our env variables... + +.exercise[ + +- get the IP of the service: + ```bash + IP=`kubectl get svc httpenv \ + -o jsonpath="{.status.loadBalancer.ingress[*].ip}"` + echo $IP + ``` + +- check the app now shows this new environment variable: + + ```bash + curl $IP:8888 + ``` + or + ```bash + curl -s $IP:8888 | jq .hello + ``` +] + +-- + +"hello": "world" + +--- + +## Rolling out something invalid + +- What happens if we make a mistake? + +.exercise[ + +- Update `httpenv` by specifying a non-existent image: + ```bash + kubectl set image deploy httpenv httpenv=not-a-real-image + ``` + +- Check what's going on: + ```bash + kubectl rollout status deploy httpenv + ``` + + + +] + +-- + +Our rollout is stuck. However, the app is not dead. + +--- + +## What's going on with our rollout? + +- Let's look at our app: + +.exercise[ + + - Check our pods: + ```bash + kubectl get pods + ``` +] + +-- + +We have 8 running pods, and 5 failing pods. + +--- + +Why do we have 8 running pods? we should have 10 + +- Because `MaxUnavailable=25%` + + ... So the rollout terminated 2 replicas out of 10 available + +- Okay, but why do we see 5 new replicas being rolled out? + +- Because `MaxSurge=25%` + + ... So in addition to replacing 2 replicas, the rollout is also starting 3 more + +- It rounded down the number of MaxUnavailable pods conservatively, +
+ but the total number of pods being rolled out is allowed to be 25+25=50% + +--- + +class: extra-details + +## The nitty-gritty details + +- We start with 10 pods running for the `httpenv` deployment + +- Current settings: MaxUnavailable=25% and MaxSurge=25% + +- When we start the rollout: + + - two replicas are taken down (as per MaxUnavailable=25%) + - two others are created (with the new version) to replace them + - three others are created (with the new version) per MaxSurge=25%) + +- Now we have 8 replicas up and running, and 5 being deployed + +- Our rollout is stuck at this point! + +--- + +## Recovering from a bad rollout + +- We could push the missing image to our registry + + (the pod retry logic will eventually catch it and the rollout will proceed) + +- Or we could invoke a manual rollback + +.exercise[ + + + +- Cancel the deployment and wait for the dust to settle: + ```bash + kubectl rollout undo deploy httpenv + kubectl rollout status deploy httpenv + ``` + +] + +--- + +## Rolling back to an older version + +- We reverted to our original working image :) + +- We have 10 replicas running again. + +--- + +## Multiple "undos" + +- What happens if we try `kubectl rollout undo` again? + +.exercise[ + +- Try it: + ```bash + kubectl rollout undo deployment httpenv + ``` + +- Check the web UI, the list of pods ... + +] + +🤔 That didn't work. + +--- + +## Multiple "undos" don't work + +- If we see successive versions as a stack: + + - `kubectl rollout undo` doesn't "pop" the last element from the stack + + - it copies the N-1th element to the top + +- Multiple "undos" just swap back and forth between the last two versions! + +.exercise[ + +- Go back to the original version again: + ```bash + kubectl rollout undo deployment httpenv + ``` +] + +--- + +## Listing versions + +- We can list successive versions of a Deployment with `kubectl rollout history` + +.exercise[ + +- Look at our successive versions: + ```bash + kubectl rollout history deployment httpenv + ``` + +] + +We don't see *all* revisions. + +We might see something like 1, 4, 5. + +(Depending on how many "undos" we did before.) + +--- + +## Explaining deployment revisions + +- These revisions correspond to our Replica Sets + +- This information is stored in the Replica Set annotations + +.exercise[ + +- Check the annotations for our replica sets: + ```bash + kubectl describe replicasets -l app=httpenv | grep -A3 ^Annotations + ``` + +] + +--- + +class: extra-details + +## What about the missing revisions? + +- The missing revisions are stored in another annotation: + + `deployment.kubernetes.io/revision-history` + +- These are not shown in `kubectl rollout history` + +- We could easily reconstruct the full list with a script + + (if we wanted to!) + +--- + +## Rolling back to an older version + +- `kubectl rollout undo` can work with a revision number + +.exercise[ + +- Roll back to the "known good" deployment version: + ```bash + kubectl rollout undo deployment httpenv --to-revision=1 + ``` + +- Check the web UI via curl again + ```bash + curl $IP:8888 + ``` +-- + +the `hello world` environment variable has gone as we're right back to the original revision of our application. + +] + +--- + +## Cleanup + +.exercise[ + +- Delete all of the deployments, services, and cronjobs: + + ```bash + kubectl delete deployments,cronjobs,services --all + ``` + +] + +-- + +Using `--all` on a delete is really destructive, be very careful with it. diff --git a/slides/pks/kubectlexpose.md b/slides/pks/kubectlexpose.md index 38b762e3f..09e176735 100644 --- a/slides/pks/kubectlexpose.md +++ b/slides/pks/kubectlexpose.md @@ -53,6 +53,44 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` --- +class: extra-details + +## If we don't need a clusterIP load balancer + +- Sometimes, we want to access our scaled services directly: + + - if we want to save a tiny little bit of latency (typically less than 1ms) + + - if we need to connect over arbitrary ports (instead of a few fixed ones) + + - if we need to communicate over another protocol than UDP or TCP + + - if we want to decide how to balance the requests client-side + + - ... + +- In that case, we can use a "headless service" + +--- + +class: extra-details + +## Headless services + +- A headless service is obtained by setting the `clusterIP` field to `None` + + (Either with `--cluster-ip=None`, or by providing a custom YAML) + +- As a result, the service doesn't have a virtual IP address + +- Since there is no virtual IP address, there is no load balancer either + +- CoreDNS will return the pods' IP addresses as multiple `A` records + +- This gives us an easy way to discover all the replicas for a deployment + +--- + ## Running containers with open ports - Since `ping` doesn't have anything to connect to, we'll have to run something else @@ -93,9 +131,9 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` kubectl create deployment httpenv --image=jpetazzo/httpenv ``` -- Scale it to 3 replicas: +- Scale it to 10 replicas: ```bash - kubectl scale deployment httpenv --replicas=3 + kubectl scale deployment httpenv --replicas=10 ``` ] @@ -115,11 +153,20 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` - Look up which IP address was allocated: ```bash - kubectl get service + kubectl get service httpenv ``` ] +-- + +The cluster IP is a private IP, you can't access it. + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +httpenv ClusterIP 10.100.200.147 8888/TCP 3m +``` + --- ## Services are layer 4 constructs @@ -142,36 +189,17 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` ## Testing our service -- We will now send a few HTTP requests to our pods - -.exercise[ +- Our service is listening to a private **ClusterIP**. -- Let's obtain the IP address that was allocated for our service, *programmatically:* - ```bash - IP=$(kubectl get svc httpenv -o go-template --template '{{ .spec.clusterIP }}') - ``` +- If we want to access it we need to expose it as a **NodePort** or a **LoadBalancer** -- Send a few requests: - ```bash - curl http://$IP:8888/ - ``` - -- Too much output? Filter it with `jq`: - ```bash - curl -s http://$IP:8888/ | jq .HOSTNAME - ``` - -] - --- - -Oh right, that doesn't work, its a `cluster-ip`. We need another way to access it. +- Or you can cheat and forward a port using `kubectl port-forward` --- ## port forwarding -- You can forward a local port from your machine into a pod +- Forwards a local port from your machine into a pod .exercise[ @@ -198,44 +226,6 @@ The response was the same from each request. This is because `kubectl port-forwa class: extra-details -## If we don't need a clusterIP load balancer - -- Sometimes, we want to access our scaled services directly: - - - if we want to save a tiny little bit of latency (typically less than 1ms) - - - if we need to connect over arbitrary ports (instead of a few fixed ones) - - - if we need to communicate over another protocol than UDP or TCP - - - if we want to decide how to balance the requests client-side - - - ... - -- In that case, we can use a "headless service" - ---- - -class: extra-details - -## Headless services - -- A headless service is obtained by setting the `clusterIP` field to `None` - - (Either with `--cluster-ip=None`, or by providing a custom YAML) - -- As a result, the service doesn't have a virtual IP address - -- Since there is no virtual IP address, there is no load balancer either - -- CoreDNS will return the pods' IP addresses as multiple `A` records - -- This gives us an easy way to discover all the replicas for a deployment - ---- - -class: extra-details - ## Services and endpoints - A service has a number of "endpoints" @@ -324,40 +314,85 @@ error: the server doesn't have a resource type "endpoint" .exercise[ -- Set the service to be of type `Loadbalancer`: +- Take a copy of the httpenv service: ```bash - kubectl patch svc httpenv -p '{"spec": {"type": "LoadBalancer"}}' + kubectl get svc httpenv -o yaml > /tmp/httpenv.yaml ``` -- Check for the IP of the loadbalancer: - ```bash - kubectl get svc httpenv - ``` +- Edit `/tmp/httpenv.yaml` and set the service to be of type `Loadbalancer`, and update the ports: + +```yaml +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8888 + type: LoadBalancer + +``` -- Test access via the loadbalancer: - ```bash - curl :8888 - ``` ] -- -The `kubectl patch` command lets you patch a kubernetes resource to make minor changes like the above modification of the service type. +this is what a kubernetes manifest looks like! + +--- + +## Service Manifest + +.exercise[ + +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: httpenv + name: httpenv +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8888 + name: http + selector: + app: httpenv + type: LoadBalancer +``` + +] --- -## Cleanup +## kubectl apply .exercise[ -- Delete the service + - Apply your changes: ```bash kubectl delete svc httpenv + kubectl apply -f /tmp/httpenv.yaml ``` -- Delete the deployment +] + +-- + +Why did we delete the svc? Running a `kubectl apply` on a imperatively created resource can cause problems. + +--- + +## yay loadbalancing + +.exercise[ +- Check for the IP of the loadbalancer: ```bash - kubectl delete deployment httpenv + kubectl get svc httpenv ``` +- Test access via the loadbalancer: + ```bash + curl :8888 + ``` ] diff --git a/slides/pks/kubectlget.md b/slides/pks/kubectlget.md index 65e8c4eca..ff283359f 100644 --- a/slides/pks/kubectlget.md +++ b/slides/pks/kubectlget.md @@ -172,8 +172,7 @@ class: extra-details - Look at the information available for `node1` with one of the following commands: ```bash - kubectl describe node/node1 - kubectl describe node node1 + kubectl describe \`k get node -o name | head -1` ``` ] @@ -215,7 +214,7 @@ There should be no services. This is because you're not running anything yet. Bu - List the services on our cluster with one of these commands: ```bash kubectl get services --all-namespaces - kubectl get svc --all-namespaces + kubectl get svc -A ``` ] @@ -232,22 +231,7 @@ There's a bunch of services already running that are used in the operations of t - This is useful for introspection from within containers -.exercise[ - -- Try to connect to the API: - ```bash - curl -k https://`10.100.200.1` - ``` - - - `-k` is used to skip certificate verification - - - Make sure to replace 10.100.200.1 with the CLUSTER-IP for the `kubernetes` service shown by `kubectl get svc` - -] - --- - -The Cluster IP is only accessible from inside the cluster. We'll explore other ways to expose a service later. +*The Cluster IP is only accessible from inside the cluster. We'll explore other ways to expose a service later.* --- @@ -293,11 +277,11 @@ The Cluster IP is only accessible from inside the cluster. We'll explore other w -- -*You know what ... This `kube-system` thing looks suspicious.* +*You know what ... This `kube-system` thing looks interesting.* *In fact, I'm pretty sure it showed up earlier, when we did:* -`kubectl describe node node1` +`kubectl describe node` --- diff --git a/slides/pks/kubectlrun.md b/slides/pks/kubectlrun.md new file mode 100644 index 000000000..5a437a9ce --- /dev/null +++ b/slides/pks/kubectlrun.md @@ -0,0 +1,614 @@ +# Running our first containers on Kubernetes + +- First things first: we cannot run a container + +-- + +- We are going to run a pod, and in that pod there will be a single container + +-- + +- In that container in the pod, we are going to run a simple `ping` command + +- Then we are going to start additional copies of the pod + +--- + +## Starting a simple pod with `kubectl run` + +- We need to specify at least a *name* and the image we want to use + +.exercise[ + +- Let's ping the address of `localhost`, the loopback interface: + ```bash + kubectl run pingpong --image alpine ping 127.0.0.1 + ``` + + + +] + +-- + +(Starting with Kubernetes 1.12, we get a message telling us that +`kubectl run` is deprecated. Let's ignore it for now.) + +--- + +## Behind the scenes of `kubectl run` + +- Let's look at the resources that were created by `kubectl run` + +.exercise[ + +- List most resource types: + ```bash + kubectl get all + ``` + +] + +-- + +We should see the following things: +- `deployment.apps/pingpong` (the *deployment* that we just created) +- `replicaset.apps/pingpong-xxxxxxxxxx` (a *replica set* created by the deployment) +- `pod/pingpong-xxxxxxxxxx-yyyyy` (a *pod* created by the replica set) + +Note: as of 1.10.1, resource types are displayed in more detail. + +--- + +## What are these different things? + +- A *deployment* is a high-level construct + + - allows scaling, rolling updates, rollbacks + + - multiple deployments can be used together to implement a + [canary deployment](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#canary-deployments) + + - delegates pods management to *replica sets* + +- A *replica set* is a low-level construct + + - makes sure that a given number of identical pods are running + + - allows scaling + + - rarely used directly + +- A *replication controller* is the (deprecated) predecessor of a replica set + +--- + +## Our `pingpong` deployment + +- `kubectl run` created a *deployment*, `deployment.apps/pingpong` + +``` +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +deployment.apps/pingpong 1 1 1 1 10m +``` + +- That deployment created a *replica set*, `replicaset.apps/pingpong-xxxxxxxxxx` + +``` +NAME DESIRED CURRENT READY AGE +replicaset.apps/pingpong-7c8bbcd9bc 1 1 1 10m +``` + +- That replica set created a *pod*, `pod/pingpong-xxxxxxxxxx-yyyyy` + +``` +NAME READY STATUS RESTARTS AGE +pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m +``` + +- We'll see later how these folks play together for: + + - scaling, high availability, rolling updates + +--- + +## Viewing container output + +- Let's use the `kubectl logs` command + +- We will pass either a *pod name*, or a *type/name* + + (E.g. if we specify a deployment or replica set, it will get the first pod in it) + +- Unless specified otherwise, it will only show logs of the first container in the pod + + (Good thing there's only one in ours!) + +.exercise[ + +- View the result of our `ping` command: + ```bash + kubectl logs deploy/pingpong + ``` + +] + +--- + +## Streaming logs in real time + +- Just like `docker logs`, `kubectl logs` supports convenient options: + + - `-f`/`--follow` to stream logs in real time (à la `tail -f`) + + - `--tail` to indicate how many lines you want to see (from the end) + + - `--since` to get logs only after a given timestamp + +.exercise[ + +- View the latest logs of our `ping` command: + ```bash + kubectl logs deploy/pingpong --tail 1 --follow + ``` + +- Leave that command running, so that we can keep an eye on these logs + + + +] + +--- + +## Scaling our application + +- We can create additional copies of our container (I mean, our pod) with `kubectl scale` + +.exercise[ + +- Scale our `pingpong` deployment: + ```bash + kubectl scale deploy/pingpong --replicas 3 + ``` + +- Note that this command does exactly the same thing: + ```bash + kubectl scale deployment pingpong --replicas 3 + ``` + +] + +Note: what if we tried to scale `replicaset.apps/pingpong-xxxxxxxxxx`? + +We could! But the *deployment* would notice it right away, and scale back to the initial level. + +--- + +## Log streaming + +- Let's look again at the output of `kubectl logs` + + (the one we started before scaling up) + +- `kubectl logs` shows us one line per second + +- We could expect 3 lines per second + + (since we should now have 3 pods running `ping`) + +- Let's try to figure out what's happening! + +--- + +## Streaming logs of multiple pods + +- What happens if we restart `kubectl logs`? + +.exercise[ + +- Interrupt `kubectl logs` (with Ctrl-C) + + + +- Restart it: + ```bash + kubectl logs deploy/pingpong --tail 1 --follow + ``` + + + +] + +`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them. + +Let's leave `kubectl logs` running while we keep exploring. + +--- + + +## Resilience + +- The *deployment* `pingpong` watches its *replica set* + +- The *replica set* ensures that the right number of *pods* are running + +- What happens if pods disappear? + +.exercise[ + +- In a separate window, watch the list of pods: + ```bash + watch kubectl get pods + ``` + + + +- Destroy the pod currently shown by `kubectl logs`: + ``` + kubectl delete pod pingpong-xxxxxxxxxx-yyyyy + ``` + + + +] + +--- + +## What happened? + +- `kubectl delete pod` terminates the pod gracefully + + (sending it the TERM signal and waiting for it to shutdown) + +- As soon as the pod is in "Terminating" state, the Replica Set replaces it + +- But we can still see the output of the "Terminating" pod in `kubectl logs` + +- Until 30 seconds later, when the grace period expires + +- The pod is then killed, and `kubectl logs` exits + +--- + + +## What if we wanted something different? + +- What if we wanted to start a "one-shot" container that *doesn't* get restarted? + +- We could use `kubectl run --restart=OnFailure` or `kubectl run --restart=Never` + +- These commands would create *jobs* or *pods* instead of *deployments* + +- Under the hood, `kubectl run` invokes "generators" to create resource descriptions + +- We could also write these resource descriptions ourselves (typically in YAML), +
and create them on the cluster with `kubectl apply -f` (discussed later) + +- With `kubectl run --schedule=...`, we can also create *cronjobs* + +--- + +## Scheduling periodic background work + +- A Cron Job is a job that will be executed at specific intervals + + (the name comes from the traditional cronjobs executed by the UNIX crond) + +- It requires a *schedule*, represented as five space-separated fields: + + - minute [0,59] + - hour [0,23] + - day of the month [1,31] + - month of the year [1,12] + - day of the week ([0,6] with 0=Sunday) + +- `*` means "all valid values"; `/N` means "every N" + +- Example: `*/3 * * * *` means "every three minutes" + +--- + +## Creating a Cron Job + +- Let's create a simple job to be executed every three minutes + +- Cron Jobs need to terminate, otherwise they'd run forever + +.exercise[ + +- Create the Cron Job: + ```bash + kubectl create cronjob every3mins --image alpine \ + --schedule='*/3 * * * *' --restart OnFailure \ + -- ping -c 3 1.1.1.1 + ``` + +- Check the resource that was created: + ```bash + kubectl get cronjobs + ``` + +] + +--- + +## Cron Jobs in action + +- At the specified schedule, the Cron Job will create a Job + +- The Job will create a Pod + +- The Job will make sure that the Pod completes + + (re-creating another one if it fails, for instance if its node fails) + +.exercise[ + +- Check the Jobs that are created: + ```bash + kubectl get jobs + ``` + +] + +(It will take a few minutes before the first job is scheduled.) + +--- + + +## What about that deprecation warning? + +- As we can see from the previous slide, `kubectl run` can do many things + +- The exact type of resource created is not obvious + +- To make things more explicit, it is better to use `kubectl create`: + + - `kubectl create deployment` to create a deployment + + - `kubectl create job` to create a job + + - `kubectl create cronjob` to run a job periodically +
(since Kubernetes 1.14) + +- Eventually, `kubectl run` will be used only to start one-shot pods + + (see https://github.com/kubernetes/kubernetes/pull/68132) + +--- + +## Various ways of creating resources + +- `kubectl run` + + - easy way to get started + - versatile + +- `kubectl create ` + + - explicit, but lacks some features + - can't create a CronJob before Kubernetes 1.14 + - can't pass command-line arguments to deployments + +- `kubectl create -f foo.yaml` or `kubectl apply -f foo.yaml` + + - all features are available + - requires writing YAML + +--- + +## kubectl create pingpong + +How could we replace the `kubectl run` for the original pingpong deployment ? + +- `kubectl create deployment` doesn't let you specify command/args for the container. + +- We could run `kubectl create deployment pingpong --image alpine --dry-run -o yaml > /tmp/pingpong.yaml` and then modify the manifest. + +- We could use `kubectl patch`: + +```bash +kubectl create deployment pingpong2 --image alpine +kubectl patch deployment pingpong2 -p ' +{"spec":{"template": {"spec": {"containers": +[{"name":"alpine","image":"alpine","command": +["ping","1.1.1.1"]}]}}}}' + +``` + +-- + +Yay JSON on the commandline + +--- + +## Viewing logs of multiple pods + +- When we specify a deployment name, only one single pod's logs are shown + +- We can view the logs of multiple pods by specifying a *selector* + +- A selector is a logic expression using *labels* + +- Conveniently, when you `kubectl run somename`, the associated objects have a `run=somename` label + +.exercise[ + +- View the last line of log from all pods with the `run=pingpong` label: + ```bash + kubectl logs -l run=pingpong --tail 1 + ``` + +] + +--- + +### Streaming logs of multiple pods + +- Can we stream the logs of all our `pingpong` pods? + +.exercise[ + +- Combine `-l` and `-f` flags: + ```bash + kubectl logs -l run=pingpong --tail 1 -f + ``` + + + +] + +*Note: combining `-l` and `-f` is only possible since Kubernetes 1.14!* + +*Let's try to understand why ...* + +--- + +class: extra-details + +### Streaming logs of many pods + +- Let's see what happens if we try to stream the logs for more than 5 pods + +.exercise[ + +- Scale up our deployment: + ```bash + kubectl scale deployment pingpong --replicas=8 + ``` + +- Stream the logs: + ```bash + kubectl logs -l run=pingpong --tail 1 -f + ``` + + + +] + +We see a message like the following one: +``` +error: you are attempting to follow 8 log streams, +but maximum allowed concurency is 5, +use --max-log-requests to increase the limit +``` + +--- + +class: extra-details + +## Why can't we stream the logs of many pods? + +- `kubectl` opens one connection to the API server per pod + +- For each pod, the API server opens one extra connection to the corresponding kubelet + +- If there are 1000 pods in our deployment, that's 1000 inbound + 1000 outbound connections on the API server + +- This could easily put a lot of stress on the API server + +- Prior Kubernetes 1.14, it was decided to *not* allow multiple connections + +- From Kubernetes 1.14, it is allowed, but limited to 5 connections + + (this can be changed with `--max-log-requests`) + +- For more details about the rationale, see + [PR #67573](https://github.com/kubernetes/kubernetes/pull/67573) + +--- + +## Shortcomings of `kubectl logs` + +- We don't see which pod sent which log line + +- If pods are restarted / replaced, the log stream stops + +- If new pods are added, we don't see their logs + +- To stream the logs of multiple pods, we need to write a selector + +- There are external tools to address these shortcomings + + (e.g.: [Stern](https://github.com/wercker/stern)) + +--- + +class: extra-details + +## `kubectl logs -l ... --tail N` + +- If we run this with Kubernetes 1.12, the last command shows multiple lines + +- This is a regression when `--tail` is used together with `-l`/`--selector` + +- It always shows the last 10 lines of output for each container + + (instead of the number of lines specified on the command line) + +- The problem was fixed in Kubernetes 1.13 + +*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.* + +--- + +class: extra-details + +## Party tricks involving IP addresses + +- It is possible to specify an IP address with less than 4 bytes + + (example: `127.1`) + +- Zeroes are then inserted in the middle + +- As a result, `127.1` expands to `127.0.0.1` + +- So we can `ping 127.1` to ping `localhost`! + +(See [this blog post](https://ma.ttias.be/theres-more-than-one-way-to-write-an-ip-address/ +) for more details.) + +--- + +class: extra-details + +## More party tricks with IP addresses + +- We can also ping `1.1` + +- `1.1` will expand to `1.0.0.1` + +- This is one of the addresses of Cloudflare's + [public DNS resolver](https://blog.cloudflare.com/announcing-1111/) + +- This is a quick way to check connectivity + + (if we can reach 1.1, we probably have internet access) diff --git a/slides/pks/kubercoins.md b/slides/pks/kubercoins.md new file mode 100644 index 000000000..3220f5fa4 --- /dev/null +++ b/slides/pks/kubercoins.md @@ -0,0 +1,244 @@ +# Deploying a sample application + +- We will connect to our new Kubernetes cluster + +- We will deploy a sample application, "DockerCoins" + +- That app features multiple micro-services and a web UI + +--- + +## Connecting to our Kubernetes cluster + +- Our cluster has multiple nodes named `node1`, `node2`, etc. + +- We will do everything from `node1` + +- We have SSH access to the other nodes, but won't need it + + (but we can use it for debugging, troubleshooting, etc.) + +.exercise[ + +- Log into `node1` + +- Check that all nodes are `Ready`: + ```bash + kubectl get nodes + ``` + +] + +--- + +## Cloning some repos + +- We will need two repositories: + + - the first one has the "DockerCoins" demo app + + - the second one has these slides, some scripts, more manifests ... + +.exercise[ + +- Clone the kubercoins repository on `node1`: + ```bash + git clone https://github.com/jpetazzo/kubercoins + ``` + + +- Clone the container.training repository as well: + ```bash + git clone https://@@GITREPO@@ + ``` + +] + +--- + +## Running the application + +Without further ado, let's start this application! + +.exercise[ + +- Apply all the manifests from the kubercoins repository: + ```bash + kubectl apply -f kubercoins/ + ``` + +] + +--- + +## What's this application? + +-- + +- It is a DockerCoin miner! .emoji[💰🐳📦🚢] + +-- + +- No, you can't buy coffee with DockerCoins + +-- + +- How DockerCoins works: + + - generate a few random bytes + + - hash these bytes + + - increment a counter (to keep track of speed) + + - repeat forever! + +-- + +- DockerCoins is *not* a cryptocurrency + + (the only common points are "randomness", "hashing", and "coins" in the name) + +--- + +## DockerCoins in the microservices era + +- DockerCoins is made of 5 services: + + - `rng` = web service generating random bytes + + - `hasher` = web service computing hash of POSTed data + + - `worker` = background process calling `rng` and `hasher` + + - `webui` = web interface to watch progress + + - `redis` = data store (holds a counter updated by `worker`) + +- These 5 services are visible in the application's Compose file, + [docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) + +--- + +## How DockerCoins works + +- `worker` invokes web service `rng` to generate random bytes + +- `worker` invokes web service `hasher` to hash these bytes + +- `worker` does this in an infinite loop + +- every second, `worker` updates `redis` to indicate how many loops were done + +- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser + +*(See diagram on next slide!)* + +--- + +class: pic + +![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg) + +--- + +## Service discovery in container-land + +How does each service find out the address of the other ones? + +-- + +- We do not hard-code IP addresses in the code + +- We do not hard-code FQDNs in the code, either + +- We just connect to a service name, and container-magic does the rest + + (And by container-magic, we mean "a crafty, dynamic, embedded DNS server") + +--- + +## Example in `worker/worker.py` + +```python +redis = Redis("`redis`") + + +def get_random_bytes(): + r = requests.get("http://`rng`/32") + return r.content + + +def hash_bytes(data): + r = requests.post("http://`hasher`/", + data=data, + headers={"Content-Type": "application/octet-stream"}) +``` + +(Full source code available [here]( +https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17 +)) + +--- + +## Show me the code! + +- You can check the GitHub repository with all the materials of this workshop: +
https://@@GITREPO@@ + +- The application is in the [dockercoins]( + https://@@GITREPO@@/tree/master/dockercoins) + subdirectory + +- The Compose file ([docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)) + lists all 5 services + +- `redis` is using an official image from the Docker Hub + +- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile + +- Each service's Dockerfile and source code is in its own directory + + (`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory, + `rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/) + directory, etc.) + +--- + +## Our application at work + +- We can check the logs of our application's pods + +.exercise[ + +- Check the logs of the various components: + ```bash + kubectl logs deploy/worker + kubectl logs deploy/hasher + ``` + +] + +--- + +## Connecting to the web UI + +- "Logs are exciting and fun!" (No-one, ever) + +- The `webui` container exposes a web dashboard; let's view it + +.exercise[ + +- Check the NodePort allocated to the web UI: + ```bash + kubectl get svc webui + ``` + +- Open that in a web browser + +] + +A drawing area should show up, and after a few seconds, a blue +graph will appear. diff --git a/slides/pks/logistics.md b/slides/pks/logistics.md index e8071a57b..5aa248637 100644 --- a/slides/pks/logistics.md +++ b/slides/pks/logistics.md @@ -2,8 +2,8 @@ - Hello! We are: - - .emoji[👨🏾‍🎓] Paul Czarkowski ([@pczarkowski](https://twitter.com/pczarkowski), Pivotal Software) - - .emoji[👨🏾‍🎓] Tyler Britten ([@tybritten](https://twitter.com/tybritten), Pivotal Software) + - .emoji[👨🏾‍🎓] Paul Czarkowski ([@pczarkowski](https://twitter.com/pczarkowski), VMware) + - .emoji[👨🏾‍🎓] Tyler Britten ([@tybritten](https://twitter.com/tybritten), VMWare) - Feel free to interrupt for questions at any time diff --git a/slides/pks/logs-centralized.md b/slides/pks/logs-centralized.md new file mode 100644 index 000000000..07af0ce3a --- /dev/null +++ b/slides/pks/logs-centralized.md @@ -0,0 +1,147 @@ +# Centralized logging + +- Using `kubectl` or `stern` is simple; but it has drawbacks: + + - when a node goes down, its logs are not available anymore + + - we can only dump or stream logs; we want to search/index/count... + +- We want to send all our logs to a single place + +- We want to parse them (e.g. for HTTP logs) and index them + +- We want a nice web dashboard + +-- + +- We are going to deploy an EFK stack + +--- + +## What is EFK? + +- EFK is three components: + + - ElasticSearch (to store and index log entries) + + - Fluentd (to get container logs, process them, and put them in ElasticSearch) + + - Kibana (to view/search log entries with a nice UI) + +- The only component that we need to access from outside the cluster will be Kibana + +--- + +## Deploying EFK on our cluster + +- We are going to use a YAML file describing all the required resources + +.exercise[ + +- Load the YAML file into our cluster: + ```bash + kubectl apply -f ~/container.training/k8s/efk.yaml + ``` + +] + +If we [look at the YAML file](https://github.com/jpetazzo/container.training/blob/master/k8s/efk.yaml), we see that +it creates a daemon set, two deployments, two services, +and a few roles and role bindings (to give fluentd the required permissions). + +--- + +## The itinerary of a log line (before Fluentd) + +- A container writes a line on stdout or stderr + +- Both are typically piped to the container engine (Docker or otherwise) + +- The container engine reads the line, and sends it to a logging driver + +- The timestamp and stream (stdout or stderr) is added to the log line + +- With the default configuration for Kubernetes, the line is written to a JSON file + + (`/var/log/containers/pod-name_namespace_container-id.log`) + +- That file is read when we invoke `kubectl logs`; we can access it directly too + +--- + +## The itinerary of a log line (with Fluentd) + +- Fluentd runs on each node (thanks to a daemon set) + +- It bind-mounts `/var/log/containers` from the host (to access these files) + +- It continuously scans this directory for new files; reads them; parses them + +- Each log line becomes a JSON object, fully annotated with extra information: +
container id, pod name, Kubernetes labels... + +- These JSON objects are stored in ElasticSearch + +- ElasticSearch indexes the JSON objects + +- We can access the logs through Kibana (and perform searches, counts, etc.) + +--- + +## Accessing Kibana + +- Kibana offers a web interface that is relatively straightforward + +- Let's check it out! + +.exercise[ + +- Check which `NodePort` was allocated to Kibana: + ```bash + kubectl get svc kibana + ``` + +- With our web browser, connect to Kibana + +] + +--- + +## Using Kibana + +*Note: this is not a Kibana workshop! So this section is deliberately very terse.* + +- The first time you connect to Kibana, you must "configure an index pattern" + +- Just use the one that is suggested, `@timestamp`.red[*] + +- Then click "Discover" (in the top-left corner) + +- You should see container logs + +- Advice: in the left column, select a few fields to display, e.g.: + + `kubernetes.host`, `kubernetes.pod_name`, `stream`, `log` + +.red[*]If you don't see `@timestamp`, it's probably because no logs exist yet. +
Wait a bit, and double-check the logging pipeline! + +--- + +## Caveat emptor + +We are using EFK because it is relatively straightforward +to deploy on Kubernetes, without having to redeploy or reconfigure +our cluster. But it doesn't mean that it will always be the best +option for your use-case. If you are running Kubernetes in the +cloud, you might consider using the cloud provider's logging +infrastructure (if it can be integrated with Kubernetes). + +The deployment method that we will use here has been simplified: +there is only one ElasticSearch node. In a real deployment, you +might use a cluster, both for performance and reliability reasons. +But this is outside of the scope of this chapter. + +The YAML file that we used creates all the resources in the +`default` namespace, for simplicity. In a real scenario, you will +create the resources in the `kube-system` namespace or in a dedicated namespace. diff --git a/slides/pks/octant.md b/slides/pks/octant.md index 2f854fb53..95e80c914 100644 --- a/slides/pks/octant.md +++ b/slides/pks/octant.md @@ -2,7 +2,7 @@ Octant is an open source tool from VMWare which is designed to be a Kubernetes workload visualization tool that runs locally and uses your Kubeconfig to connect to the Kubernetes cluster. -Octant only ever performs list and read style requests and does not create/modify/delete resources. This makes it a much safer tool to use than the Kubernetes Dashboard. +Since Octant runs locally on your machine and only uses your kube credentials its [in theory at least] more secure than the kubernetes dashboard. .exercise[ @@ -10,5 +10,8 @@ Octant only ever performs list and read style requests and does not create/modif ```bash octant ``` +] -] \ No newline at end of file +-- + +*We can use Octant through the workshop to see our resources running in Kubernetes. If you don't have it already installed, you can ignore it.* \ No newline at end of file diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md index a969f44b5..8d8a7aec7 100644 --- a/slides/pks/prereqs.md +++ b/slides/pks/prereqs.md @@ -85,11 +85,10 @@ class: in-person - A large Pivotal Container Service (PKS) cluster deployed to Google Cloud. -- It remain up for the duration of the workshop +- It will remain up for the duration of the workshop (and maybe a few days beyond) -- You should have a little card with login+password+URL +- You should have a credentials to log into the cluster. -- Logging into this URL will give you a downloadable kubeconfig file. --- diff --git a/slides/pks/sampleapp.md b/slides/pks/sampleapp.md new file mode 100644 index 000000000..4da4643ec --- /dev/null +++ b/slides/pks/sampleapp.md @@ -0,0 +1,145 @@ +# Our sample application + +- DockerCoins + +![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg) + + +--- + +## What is DockerCoins? + +-- + +- It is a DockerCoin miner! .emoji[💰🐳📦🚢] + +-- + +- No, you can't buy coffee with DockerCoins + +-- + +- How DockerCoins works: + + - generate a few random bytes + - hash these bytes + - increment a counter (to keep track of speed) + - repeat forever! + +-- + +- DockerCoins is *not* a cryptocurrency + + (the only common points are "randomness," "hashing," and "coins" in the name) + +--- + +## DockerCoins in the microservices era + +- DockerCoins is made of 5 services: + + - `rng` = web service generating random bytes + - `hasher` = web service computing hash of POSTed data + - `worker` = background process calling `rng` and `hasher` + - `webui` = web interface to watch progress + - `redis` = data store (holds a counter updated by `worker`) + +- These 5 services are visible in the application's Compose file, + [docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) + +--- + +## How DockerCoins works + +- `worker` invokes web service `rng` to generate random bytes + +- `worker` invokes web service `hasher` to hash these bytes + +- `worker` does this in an infinite loop + +- every second, `worker` updates `redis` to indicate how many loops were done + +- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser + +## Service discovery in container-land + +How does each service find out the address of the other ones? + +-- + +- We do not hard-code IP addresses in the code + +- We do not hard-code FQDNs in the code, either + +- We just connect to a service name, and container-magic does the rest + + (And by container-magic, we mean "a crafty, dynamic, embedded DNS server") + +--- + +## Example in `worker/worker.py` + +```python +redis = Redis("`redis`") + + +def get_random_bytes(): + r = requests.get("http://`rng`/32") + return r.content + + +def hash_bytes(data): + r = requests.post("http://`hasher`/", + data=data, + headers={"Content-Type": "application/octet-stream"}) +``` + +(Full source code available [here]( +https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17 +)) + +--- + +class: extra-details + +## Links, naming, and service discovery + +- Containers can have network aliases (resolvable through DNS) + +- Compose file version 2+ makes each container reachable through its service name + +- Compose file version 1 required "links" sections to accomplish this + +- Network aliases are automatically namespaced + + - you can have multiple apps declaring and using a service named `database` + + - containers in the blue app will resolve `database` to the IP of the blue database + + - containers in the green app will resolve `database` to the IP of the green database + +--- + +## Show me the code! + +- You can check the GitHub repository with all the materials of this workshop: +
https://@@GITREPO@@ + +- The application is in the [dockercoins]( + https://@@GITREPO@@/tree/master/dockercoins) + subdirectory + +- The Compose file ([docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)) + lists all 5 services + +- `redis` is using an official image from the Docker Hub + +- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile + +- Each service's Dockerfile and source code is in its own directory + + (`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory, + `rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/) + directory, etc.) diff --git a/slides/pks/security-kubectl-apply.md b/slides/pks/security-kubectl-apply.md new file mode 100644 index 000000000..9be59165d --- /dev/null +++ b/slides/pks/security-kubectl-apply.md @@ -0,0 +1,52 @@ + +# Security implications of `kubectl apply` + +- When we do `kubectl apply -f `, we create arbitrary resources + +- Resources can be evil; imagine a `deployment` that ... + +-- + + - starts bitcoin miners on the whole cluster + +-- + + - hides in a non-default namespace + +-- + + - bind-mounts our nodes' filesystem + +-- + + - inserts SSH keys in the root account (on the node) + +-- + + - encrypts our data and ransoms it + +-- + + - ☠️☠️☠️ + +--- + +## `kubectl apply` is the new `curl | sh` + +- `curl | sh` is convenient + +- It's safe if you use HTTPS URLs from trusted sources + +-- + +- `kubectl apply -f` is convenient + +- It's safe if you use HTTPS URLs from trusted sources + +- Example: the official setup instructions for most pod networks + +-- + +- It introduces new failure modes + + (for instance, if you try to apply YAML from a link that's no longer valid) diff --git a/slides/spring-one-tour.yml b/slides/spring-one-tour.yml new file mode 100644 index 000000000..1e06d9f6d --- /dev/null +++ b/slides/spring-one-tour.yml @@ -0,0 +1,62 @@ +title: | + Spring One Tour - Kubernetes Workshop + + +#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" +#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)" +chat: "In person!" + +gitrepo: github.com/paulczar/container.training +gitbranch: + +slides: http://k8s.camp/s1t/ + +#slidenumberprefix: "#SomeHashTag — " + +exclude: +- self-paced + +chapters: +- pks/title.md +- pks/logistics.md +- k8s/intro.md +- shared/about-slides.md +- shared/toc.md + +- + - pks/prereqs.md + - pks/connecting.md + - pks/concepts-k8s.md + + - shared/declarative.md + - k8s/declarative.md + - pks/dashboard.md + - pks/octant.md + - pks/kubectlget.md +- - pks/kubectlrun.md + - k8s/deploymentslideshow.md + - pks/kubectlexpose.md + + #- k8s/shippingimages.md + #- k8s/buildshiprun-selfhosted.md + # - k8s/buildshiprun-dockerhub.md + # - pks/sampleapp.md + - pks/httpenv-update.md + # - pks/ourapponkube.md + + +# - - k8s/logs-cli.md + # - pks/logs-centralized.md + # - k8s/namespaces.md + - pks/helm-intro.md + #- k8s/helm-chart-format.md + - k8s/helm-create-basic-chart.md + #- k8s/helm-create-better-chart.md + #- k8s/helm-secrets.md + #- k8s/kustomize.md + #- k8s/netpol.md + - k8s/whatsnext.md +# - k8s/links.md +# Bridget-specific + - k8s/links-bridget.md + - shared/thankyou.md From f6b15e9f3a405950d74abe8f62fa398f1884c726 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Thu, 30 Apr 2020 12:56:14 -0500 Subject: [PATCH 12/14] fullday-namespaced rewrite Signed-off-by: Paul Czarkowski --- slides/images/you-get-a-namespace.jpg | Bin 0 -> 84955 bytes slides/k8s/kubectlexpose.md | 61 ++- slides/k8s/kubectlget.md | 24 +- slides/k8s/labels-annotations.md | 34 +- slides/k8s/scalingdockercoins.md | 27 +- slides/kube-fullday-namespaced-pks.yml.no | 89 +++++ slides/kube-fullday-namespaced.yml | 76 ++-- slides/kube-fullday.yml | 1 + slides/kube-halfday.yml | 1 + slides/kube-twodays.yml | 1 + slides/namespaced/connecting.md | 106 ++++++ slides/namespaced/handson.md | 136 +++++++ slides/namespaced/ourapponkube.md | 162 ++++++++ slides/namespaced/sampleapp.md | 359 ++++++++++++++++++ slides/shared/handson.md | 133 +++++++ slides/shared/prereqs.md | 136 ------- ...ng-one-tour.yml => spring-one-tour.yml.no} | 0 17 files changed, 1139 insertions(+), 207 deletions(-) create mode 100644 slides/images/you-get-a-namespace.jpg create mode 100644 slides/kube-fullday-namespaced-pks.yml.no create mode 100644 slides/namespaced/connecting.md create mode 100644 slides/namespaced/handson.md create mode 100644 slides/namespaced/ourapponkube.md create mode 100644 slides/namespaced/sampleapp.md create mode 100644 slides/shared/handson.md rename slides/{spring-one-tour.yml => spring-one-tour.yml.no} (100%) diff --git a/slides/images/you-get-a-namespace.jpg b/slides/images/you-get-a-namespace.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4a126921d1a9a171942e9a848ee48d1e374db03 GIT binary patch literal 84955 zcmb5UWmH^E@FzS1gTvrXaEA~)xHDLAcL~9RI|O(4;O_1aAh_$G!CeCc4H1O|CjQAs{nK`5CDRLp|7eAi6oD!*I{>DMC^8MFD9(Jhl89~UF>YEO$$2@=aVZ` z^Q7X?b!;5khM3a2o;prb+4c7a!E7Mn^S3p;2hAO~+ERNO;oO;A#8YbO2}Mp`ZLt{d zf8jT6$_-V`2@RUd`k2TS3g}n8D`*v9tF^UO?C@5c&kXy044YdV+mjlI0@#O5_jtgw z4o;NoNH$kiyf6HukPG~jnZC$U(R$N_FO%!$?ltpMcfI84{{7Szzq-~`Cu*Np!2wTI zfUNq_gdT2B==Q&4O)b9oe1{u&A9b=ydft9#PcY>t!xJXV?8yrL__(>zl>3XKe6o8} zsqCn8^GJPk=r&&Bqf79WwhYC>(PjX=z(X0^rG3gmoA(#Iu1y z;NjcUlkI1Hbk~5z;A(!x?$<8l@zaU_=Mcwk0P#-q%=D83|5J2@^vPkz6WL9RqZjsO zd|Fvq#Qa%qhj8!0-Hf%cxrp8!;lQ5wM@=PP3%i-sO-P`3E_wzYTz>Sa5k`myTq4F$B zWdIp#-(kXv>oG|c z1?c>J;9f)U5`*Z_rz>@x+J%8X6r8!GLhmY$=*xMl+0k@E$#NsFHkX?`y@h=tZ#!(P zFm!U6^YhlobyTSQtv(hHmN42XxGn%;#j98theD^Mnnpo+SU|S0smkgc7>97I1kquk zG|N2@in`T@`5xwuoIbr=K_qMp}CGEkT59ip!rlgg}xa8Nu?c?tPhH4(M z^}9aSZNa3!M`gDPgO`tMYvFaC_Z4Nw&>J0Z#&@$Z9oL^bMlR#Q-vlsz3k_2wV&##q zY!u%wQ*3d)LTAsb~PVPTja;Zx28WcvwhYm?D0J8%7Uns0GT!lni z{nN*f=DS11NLmi!Ha}Z9-mw!V^!E2+kW39Mjr|^w8|^moqR23$RL&T|F!D6Y9?5~{ zE7=?b?(kr^R6NVhux%hZ(z*w%qnSFA^ajOJZF?I_|HV=`Qmz?0$>S@6O`jL+`Z271 zUDV*1Dw@s{@B8^B>(*ZFlIqapw%zQuzUI@~Z{x{-fTC@mR6p8ZK29b-1`f-bCQe1) z7B$vMLZnCnKAEDDdYS^ZdGl{z-ef`-OSjojGL{`JxKP!FSDtjwm!*jT8!RD6C(h1( zT6a>eEAhO}TlaVVX|!Q>G6@I1Up{z**CZTOjDfFhGLPgRgrt3nT4!3~!0XNyn9Dpw z&a2nf&R2h|DZVqkuadqIu*n+onHyd{xSKlCtt*(kDdO1P5<6Jxwk}OJ8Bc1_I*sc!D`#wCPUj zFD&*p9cwfBUfnHvT9c=#AD@u7OKtfkS>e_Wj%4B#bHHB{5 zt+SlqPOC??9}*2FJV*|ls>_K_4?32i)y9~ERGB!<*}4=A7euLM6LJ9WMRmm;0v9j$ zQRt<=7CtA~I$mA_qSw)-F^B7KtZf~t$Qj}tn=(6t?e`JNm;Wn`Tv%bl`(`efLneQu zAcw}tLdA;@GpevIe}k+93*_QPJks;TaRaq0IO2YeSmKOAeugdQ49o!!0C$e6U^J&28;*oFayq@&&&zDuBplf z`d#aLtEYOJN%+yvdPEoZk=-tzNLo~t*WP%#{YnwC%4^TO`K1q1VJzql54}3car)Wq zbOlhPnTwfDJX~+PmszoN(dElJ?fkiW|6P^^dKJ&7j_Ch*x?2(?9$VyWE;ayO5Z6Fc z@iGuLqR>zbJihN*in3VlxNxjzbm-H-Vxw6_zhU%7=diG@z8c|Eh!=#T+9Y(X1R^w# z=+abAfEP4c`p{jC^E$lp0VyVUj0UN@Vfx4g?jy53m!ThR$BFGPg&ST)41)b9Vwk5N z$`t3lwl=*MhQuJI|^W6S6M zo2;}9*!1|84#2&5mgVH-H+tR_9=Q_J1ROJTyS^gf<=_7P{3ER6yS0FO++z#Rx=9c^ z3H#B8^`-6En1vjTpA)TIE)0O%S04r=RueXwVUd=DM7oF&mw(>bo)MjGJCCpT?$8Ni zB^~uz=gYsj<{tLqif*+Xe9#IYLT94_q^NK_h2m7WyBx1rp1UQ|iWO&qt!PYyCdt&v zHJ0mk#uj$TjKWId!zqj-3OkYpr zub*v?s`hm+h(cVKWXBfIj5gm2LmJ08h6ZMV;>dWny)&TF+?RdH>G`rk8w8;}!sD}e z9K$jovfV z95A;WNuVL5pi;HOyI`Wr$%lkM`Ezi}Q3+$^lLe=&+Hec;v)H^Q%!+XI3seirw-waI z^9j|Y5*^nBaz_e=^`vc~e|AeOH9{dQQ66de$LeaV>5MUPu&cCK<#-8ne2HM*P|Mw(^Losv4_`X_c&047T{I#L^tZfUcYB;S-A%34+?+>mN z(+U(?O7%r4$Syx4B_v=?=W`(kVn_m@)h(=L3_r0UTRi$lw?gpzEPuCj#|8EE)Sq^% zm++inSVRn0+hOI@`{)lpOd)nbKG9k{$SViYaR3lUz8D>fO6)!X<(OOz7db1x8$|D3 zud8C}?q*?L;8F(VY11|+g_{)5+Dy6$K$9=DlI9EvOdZdWMrJ?)Nx;yog^OcTkbw*U z6>YFNOyvjm6o^8P(puLopI=#f`D*;x;!uvKqvvq2)x2XDo}Cli{_F8Tdq)j!$nvq z28fIEnIPcVK@INkL6hR7NN>tV_~>R9osN#{Y&fgcyH1~dQ;c%i-$_)g(yE$8rB4(h zwAQbqtd9q1TC=dbipl8$QliN5sXHx;=9_2md_Ng48A1cZ*D;3fcU8!i@HXn}ybYMU zH5&#Hb4;RxOXC@*vLOI~HJ+rJi@>xbv0SWR^F9@j+C(82(*%T?-=vz55nRT$b4u~0 z%w=SjU5|-GDsKAYvZjBoT~^z8#8$w(b;E82QB|`&DQ^-g>STGy?tB(SPm}J@d z^DZUsnE%3!3_r)S@HUbkfGT+r(PSNyz>m-3;Q4LJzj$R=M$3&I)1u{)P#Qhg#z{!Fv6M1>f`D zWg#!iTQk56^^AK}{ckz5FC;ONtBAvJB&l&)FGoza22>mdeB) zJbkN+{0ZXc=l^ID0e8eN26Ze?9ewv0m9+bskfm(a_7A|d+Te08VeRhMe)`0HwQ*?Q zOnmtXVtedmAR$&gpM7a`?I*}UC{ZO&N8f8*oPl#e`A~d&m{rGVuUBNMK*-&WhZO#um^6dS7ZSH#h;(7|Je7yu(TGQ%niaZLpi$}ISZjCd4 zyL0?-Z5g@tD6ET&aN~!Zax6S_smLooi^(XX_Eo$)uCaW^G;K=g0*A+JLz|ORTxY~1-$C4%#*P&|yMJ}_51vd3 z=0+dQV@hlY#d^R0(a=!IM_w!T6+mf4UqZ)`YPlk=*3~@|ypnZv@~(?Q__KaEy`@({ z^Kj&`=WWl7P`;bF!1&G2e*p6IXpU4wyqfR=(XodlvMoj5&SKkvb~k^2{i#&^Z~JAo z<7-wj+*T6q5!AC8v?Cnydqtyd++w#kqQ?h%-bvNi?aIYFEY5z?tnY=A(k)YtrZ2vE z=BBh5Qa9D;#lkB0lRUimd1&uT$}Eh_ZVDEUN-6ty52>Z1g1BQR0{Ix!1}c4$=j-=o zFUezWVN`@X>ULaj^oQ(AA!1eB3vCPrwcbp;mQOBTQn$b znA94gxk?(#?lAg?5Ag{`-D=u=WV_6XTUVa8S08_Xum$YzSs1i*l)4_Vjoq^qY~p@nEmxudcQkKcG&();_kn%q$u3eZiP>%J9=$ z{|Aur{GqP8meJ`YE?h~8)lYuCUrvW4r!88Gs*2Bcs8nousTEXKL^cqz02F_x{#iH|MaYc+N|4{z+s@93q7??0`$R~>u@EKI z2#D$Hmjal_8|U!9RKCSB9QH$%8JCW1Of~>8p+6%My{ER)9IO*g|wmRM0q^ zb1avQC@B><>uEA%7<}iR{YA#+Z?rDrDVt|hz+x>@63FSmC`Nn;5TRErAfxHejQnKs zgb^lU7C@_ja~TvEi6#RA?Ey<604Raf@AqV>XjmR(J5P6w3+ig_LZ@z4wxgGM-S&FY0u7Y=~30ztBHG&aJZZts#308XjQ0xDrhfK<>24S3uOceonn~f_l35E$kjuu=NeoHMYs5;jR z4%q1xQ$YX*^nrna*sR7}!6iYM@&U=3*}{rQ$v@HZ)DjkAB}%!f#v2~LE78Q4cHNQK z&Eg4^7vrTU!min5nGVO#Sq?w@p5N~>k?FLC-Y_lhocnzdG+hgk${<641t0+>1A4_+ zb4(Ue^_+#XNoHWAu#5-csxh?{cKzXq^7# zkmd0euhL(JKTZk0s30+FM#tBCTqr+o!jc<|KnDj1!2C~u#5m`{6IJv00C7$ zUwSNp>Jt!AoO=)x0TU-6mabQoFy5Ui{F~aq8Y1(I;ucotpEuH;$83Yf6^~ChRhOa{ zV+k1a)LPa?tlxxm^AxrzY3AJapBo^{MjDrRygT!ML_8EvensjM;UJ3TM-bsddQEol z03X4TfSq6T)UU&mIny#DJHo`jqJ$4D;Lwr;00J_hm=eX5x_v58DH|YQn;Lp51ExeM z+@d+WZ+s1yzD%9LT%MGjZViS{jM+w({x$}-*8TlFA5Ia}45CijR1)xQ5f%-#4=*J% zR+O2YPb7bT@%){o*8T6!C88!ZA|^sF(9r<|0?-B^z0NXpZm}Q^MvMTGg0tM{FlR4^ z66-$r9Y(&9WU^bM!`n|JLrN@D5PL10bUY2DUQ;Bt3*e4qpbhMGM1%#II4Vkk>hm=|bsm+N zzr9{xAVQ>U7_jXdfT2G@r* zK*sZ<)~!jOsq?CIV<3!)(RMIidpctdy_@%evR=~2@6-4FTZ-@Lgi<&~lu&U~bw*b> zW%P$g#^2ar5MBVPSO8uC3pFL)C+{QMLz_>X?D|2_xiqO>Z^2qTXR)R;b4gcny1Wo##dLI8MTD6^C>Yb5oV z<|z^Kk`6&lFLq3IJ!#EqR7(_zuTdPSW=VJ<55YPXcqloj7;Ozm6JW;R`RHqe z0pBn`{_5bkiyu3#i|2+wS^BYS)xLMlU$g$0(sAwV$WG9g>$yE8g^^F^Vf2Ay_*#s|2R|TLaMa3zGtfD%Ykg_Htm5>_S$6+sN@<~2#jueYDVgLA~1h5UT z@dc@hk?eM=1jT^R-vP_|27lp421@suY!ghA%ky7Ow0~DLrkI!1@|Ix*XP3&VsL2Bm zId>Rz3(AFOWR zJvi#oana%J^glp|+_nk;K+O+8cpsL3C;?;z)DQtN0zrY< z`v5V(WU<@Fa+Ce8i#WGllQYyB&T&gVq^xa%XxB_JGMHE@Vt!2u8YM7b+dCIiAplhN zMpYrcLZ+VVLFm}4#9VziNE-;Ki9#g1miWbqr_ZQH;%q1=LZF~blut=1MT7w0hx9Q7 z7at-!LQzp@Gu&X|NJyON%dvY&xz}zXW2mSSAXPM~M51>RNM(KffL%q@z==~|KuF)a zU;!0_7>+8S2FD-0aHGbqp6C~2vAKM1nnPqf01$@498#AR4UjeRoSva)s2gZ9pYe!i zl-q^^pxe;xLn3}48H5lCD>y;rED;kF&{2B#(dpogvs>}#GZbpUsEwxv>&2-k9ZD9! zwz76aPAp?ctAIOEHW#nyiGz!0;pV(UcCDo@&$}!Qn?`{Jfe<~^(lw{fiI!nQzS-$FgNEWh@A;Wv3-XV5Z#!M0Bd4>aZ8^Dm_h?+n7n+A0KWr?}gp9wfZw zb_W;Uyv@lUsM9srZ47k{9}l(|lw{ko-0!;*@g0e$py-Q31?aW5Y84i_s)WVsTDT@T zyQw`zfTLqL&je5A+Qqxkns8V^bn)x#YF%5t;bY$vsJ0GqdFqKgy`jN%#>@=QVV_f)G*=BpYQXXh$ziWX)pGKl6KMLsMB$~vG8;e zdFG0PGf}tqcEn(jvgApsnk{WC@42&8E;wv6dJFmLaU#5JcB4ycJR6L0srSjDW30nl z-}9y6M5D8@^C+w5LlohcH$C0)Tr-W2nLdg^nWVE`-qx!2c)8)(s;6nH!9=oE#L?jT zbkuVDzyppus25V6`J+hf;)Q4R$!iYYv%zxA_0M{=#msBZCAYhpkMW^>+HwB+_oQtt zzeFZ)MQkXAuy67zH-z6NJ-jKp{;k8SmG8hVbjH59G(2g*8*p7ItT98aWaFS_nMUC% zTl!T7eLX?5d=ro5bIAqgsNB#sb{RSnRRVvD-}GVBCn-+q_YDq}ZC@KQ_JjHu#LXwYd8y ze(uq;en`C?Wr9*gWg$ZhdZXf9&cS`ccqjapc_RExJ+rO21lq9SFRbR6IAwD@eTqGOerp_2>|IXr~jAQ*Su#$iE}e5T9Dq%FAJ-(!jH)uff4xEx+M#CzPid-mYKzDR8MmcBaC21kz9*B}CSQ=pzVS!eb#S{)0w?d=?8{gfo z&eGYzbQpg*w7wH`SHb`ek{?+Hgx)?BL$i_e*DoPhQSInW(3jvyW5z6CsxD!8XmORTmvH zmFu&v&wUh+L_~7nQtglH>{495cMC1U6gE|a?(+g3gs#ilR5mc)lhGEl^2v^e=JV_l zX%DtsCl9jBu*bFc2g|&tr-GWMBbImBB0)AO2?6dg`3oWZP$FjhkDh7P`X35ntBcxD z-8$E@nU}Zhj@pm_UYDkR&&Is({R!?y|Dvcrmhf(`#W#1z)X<1p1G0k1y z7`|azzz0m{1I6g*fEtfc_?SENUqLq2DEwh2AA$lys0PX}V{hopcQ!C&iLDHpBi8sg zuC-hfQvT9$6}Mzi8-(i139578wZZ%d*}yCl;M738`JUzb zN=Q(~kKmLb@-tJM+58SGG3H=XPPP1s2-sy7N=hi=n7pj-YnQA{aG1u@ zD7Co_*%)p1(x^vlhWwZ@+SZ8DA6FvdCEukQ+i8A87s#!X{)!SX$+lz@NC=)9M)V}6cm_PB@E-O_+!QASJG}t zQAb;S{X{>q>UKs_-Pbl)b7ZV`Fv&YG z-=W@SFXb9(E7=@rpQm~orXGJF74KU68_iu~t5PvdlZ&QpoB zo9_rgFDVEW*wA1Uml#}_7ad|3HR>#L;({8<%iDk+d;;|neD?mIeV=qI{ibXcz9TLg zci_c82-UU1u?AX-uF85ghyO}Ew=TM_Q=BjlD4$HL@i18_MD$cEXocIHyg$6wJ}>j5 z&n2DD=q^cHe_lff z<2k^8fSuWYfX0e>DlN*>&{xAZXQBQLcI>Z&Gqtp*FXeu>!C?MNz0*o*F!<+O=9Ao9 z=Vp!aZqHl(jf0B9ytvbu9oHIzJo%D^x5R{@lguPvtQIZ%bTj=?WWy>>d1T$>HU#fw}leX_~F>0ilHn z+ro&D$A}{3#j0-Y!;`XTS}0*dA)ignxAC(m7yFEnG;Zy`O7aye>t?>@$<=T@zP%s! zt2KHe?NXCwPr95P^PXe7ZvVfE-#<;3tZM2PcI(Gp=*1|surt-HFZ*gJwltUhidAW6 z;k{#Ta@oKy?{p(@3zvg!B}}q3O^~`sAg*& zW5?-1F&;v=B7g75)EzQ&ZikIJ@~Y<-On)N8e}G&gc#=pukKuxoLw?*y&$q+ReD&dQ9?%Ip0nX{v1(aAc)Hp$)%a1{kB;MoQ4DxRTdISwt?H1hat(9y*=OZ*fryTRff7Sv zVq&bnQ}aJwBJ;B}!h0;E`$3527y3>@zGGw&q;S4s2iRs{H`ohfkTbDc@%tsgpYbh& zuZ;Q)7Vj$giY?FhG>4oL8-t~(Z#u0oCKk@%`d;dB4st1S$<}Hci==a-w-Za}#j^3v z2H4Cm*Kd0-@;4j5mS~*f5gT29&L18`5ZSjDxw`B-Ta4FxNcKaba8|gjs@!hNtT9+z zi^Xn%T$K$uJbz1k4Y7?}%;p@j!ZyrGd^S=uM6T-^KqqzZqsedV%%PH-n%9Me28(~3 zzI%DBR2VHwJCHMX=;ZTzH+fwxobkw?Evzk_74}zcc#!)GR&C~a5&p_EtkY!nziI(Y z+f{aJ6``rtm18Wvjh=2_u)0Hz^BG*Axjy{`=`753OP`}mr9RJuU!`z?qk+YJ$|m+~dpO4N68XGpw7^{5Je^NVcfVB$ zg+@EnyfEH;oexB7t+a*T^P`pAY@qWL`+02(Nb<#t?H|B>x&6V(7lm8bwWCftW);7T z?`!aU)f7tEyy{;AY z4J@a+ncpS8{?Xje&SB$l)p(wg-_No<3Xz0oYhk3 z=Q>xunV!)Rhcgi{9nRKnXt3Dro$_QantfFd)d@D}I8Ix282jabR;n(%0VBqlmzUp} z@bP8|#H*8E^4Yo~CyH}e9I(6>4jpwBdk=PhIwDo1D_Yrcu+tszgxE4)1zJ$z9(QoHs<>s6#_ zGs~FZMsd-0p{J;sOF}RABOZ5#woxXY2&16UdIqvQ2c)ihP@NmV$9(i)g5;&zx>EfE zoM6+=uCA&P$NlImefgAyA5tk$(L_=?&y=uM46m^XsuIZOuDf)TYBD={T`@a@9~~>( z#=KUThV#*IRZR8p{tT#BJn(=9|B7myd*2P7Yo)U%9A>@X*3imK(yPJR_ENs>`o_UA zP%aL`9l2JF>XNz*G_8dmFs^=wMGOtvbX}~L`N&+HvteGBkt^8T&OG;(C7s?)JVu|C z^J>oNcX9`bt5>-_(JDkG8>UydbK5$%skXnpDoYs;5xA(9WokgI6J0o74TKh*nv}KA z3yh!3WS2I}{uTZQkk8X^`Scn|StGAot#p+zQ{4owf2^krN^a+_4~2?V6P{*_XVCwx za~>WbB_p#TqsN&Gn_3!uM8EGeE5aWdf!icbwzPmMPPEeU_(r@jSpICN{3U#KG>nBe z6QSzKYl46j?O*dY)u^F$l}G;o#VdK?>x6CuE1Gwz(MGi=BRhIi2kqxVs<#Jmhkv?_ z&d*u!Uv_5}GM>Ay@Baa`p(QOM%5*@w&+qu;|r119Sm^3>6&dOZwS{K zzxu}1ANyGw9DEVYKATh4bd_fAKMwkn`rfs4r%G9?lYK3 z(wb;Y9A_%}k>?oP9ve;Zye zcvsHXU1OL(Uc7hx`<3Ic9UTSSbW+j*gxWIs3Xe zm48~&q7kpTXlsU0Bi2)+cTi(vgP)_=CVae7q|R=`*2g>f{hUO)se;fC)Ro+Yr`o)G z*=nWw)=#{}CapEkSgRXzdBL{lM>~^$?dsv5xdxf6jphmG8n;vO9QD@Yr%1DTYk^32 z)hf3wZ(hctEdp1xGW9wZo85eh@*gjGr4JlIUxH6d%qj(* z0G-}0b@Vll!9m7l;&6fsTEu~n$SK$4WhUd^vWjtGao8dv_YLYW)h|aSS*C|kECG+#=sNv0KRXPU=YhS1+ z?Z`|L%wvWJ$Sg_dcuzOwGTn*#WsvEZ27~)mVu$UC!5?7=@-#n^cY0UFcPXt|aUhU+ zNa8RrCJmWnj6Dw73SSj#YgwvnaUw1UJjuW!5?iHtk;w=*E!KuTD+W8H4neld{uc_W ziaJtAvuXQ@9oxLh`g>U>Of|CN_c-ry_@L`zli`8e#K;=U^p)iAB4P`eXe;E!Ws=#3 zG{$P_IAv<*sf$Rl8fR|D3I_SZp?hTJCa+I~QydMMB}R6T$(TNYd8~P$d8pW`$}VJD z7O+i56XAr(VHZbLFH|VbE>5K8CH}WeEV|q2S7H}&4C7k2kFZ0TJW?FN4 z60Ke@{gf1^p)J-BITkLzjdGW8=5h%uIe*$gxsOWk13t#ZCfG@u#44B~ywRmnX3f@v z;Q)Cht$JUP#v^?JlbS+K)>8$vAqBLLlpbQGC6uSCH>ZjaOK!77-GH=`ik8vpy-xGr zu<*dZxLl;bBx2aOjM{6QO5-hIC?V^ZxY;V05_d)AjAQztn6#KLVLh=cY?pNOGgTLp z>9Qj&arKvlM{1$>7>9d>yMCvvn83|+-Y6k7bt9qkt)`>?T(2&>}I>gRc8 zZ9?ik{`wCY`zre?n+2n<+&;WV8m4G5e1Mdvz@({hy{O8fMzK%BMIoDbwA~+uZek*s z#Ka!@_TakcI;3c#;lw7Bs8CXZzIwND;u@bk%@Vho_1Cqph%P>_TqG@T&{NVcs=PZd zs6}(}B%C*cngaM2!okZp=xsZV>>311$7`pSl^;9ul2;y8Z0mySi$6P36AW=fkwdM% zYfVaL2}g~X)T`hL3yqY;^|~XV0x|n&8pVDl2y$c2HLTSKswJ+=A(#d22=vfm;>96g z5{x(m_O5J;{STk7VKvVsZyGq(D{Ti?trM}$sHr~l>>#_CTgMFkDAYi$Ru6xNNiswG z$C730+>M5x7=;r>x3$%*eC=n9J46sMZme&mQ;-44?D%IMdDaexA;%)eq-;rX@H-T+ zCHQzfGe#E$C3DY8&mb*jWKWhjj*H9&qt${`ErM!0T*a)h_(7$@p_Z?>Qqf`>&pES=K-Chz!aq(qkDD_MA{EJZ4r zw}#V+YIw;AMfz*fq|(9+r=WgK;YIqH)0~?4r8L8MUe!W9`BYPPIa^+#ZmTiJyLtkZ zcgDwoR_m)m#9N5XjMV-o1`6s8YwjT|d7axf*-|0l_rF{pMl615$ikX#T4Vddz;*=u zql^&(yReecN4XF-sUUF^dm^fUAWA7oa^{hz%ug#9q+z=VyD+-jAU{BHqhKkL2bo9W zh!2^^Tu4;4REuNDLxRaQC+kPvftLmVxfwuU9Ypb6nPOI5wmGGr0uCQ(PANF2V+`ED zYs6uXav$@+#iTp`;U8c- zVvSiM!S}X|WcSdH_f*k=DlzF~J5()Sypdzjae|+ykID2Xq(e}O5iO-LWL(x_^d}M0 z2aA!CW??2Ud^Ehw#GM|KIBN1EUTJMqCdqwfZNWK)$w=+uNVTShuwJFGTBUhUdEABL zg=NM;nz?U5CoFclq@r0qdW>kQZb17=K3Z*u}uc+qbgd}XVx2@SABhUtBC!N9yNzzzX=3lk9#$hH^^|Cd+ zC{_I`%rUB4Lx@blWIr!*^ifGdQ0P(UIe24Bc|kIex#oV=Lt@(CDh;y= zHL`3tTX+LW%7PR3Y=XkICr2Tgm>M2$Y_Q2HZ|Jk)MB$T(tky`Jq!q^a`iBzs)^Lkn zQNvs-Da{)Fc)OAVk#5c$s%5-WxDW`V3?XfF_;qn*#fGfuO=X0nB$}8=QXkSlx{%)B z93^3#_(t%!!NCg|gH-vau4f;u#8fQGB#>2=_vbndC>(TP;snyr#P=nQvXyQ^#)_Tx zUx%IJ532aL9sT~7KvX&Ov`01F+e5N3-W`*O6n=eFYa%~5>?>ufNX;}6OcW5qbAyl= zPcu3e1}(!Vb@#>SXz(4hcKeIsMut>eVYA7JA;U*Bx^7O-$;mX-l}r{yM5#KY8Uu?K zibS9ID3knAMyXXYgevJ>?~ms4ih%~jq;U&H7PNQO+N`+4ZZ;?KHR> z<#RatKJ)76fi0nl!{Wz}^0*zy_6cWluvL2ATtcB>1Q`o+a*SoXOxBH`bOY3i(;Ssa z$b!Pl;7g*lE&&+@1hIZK8S3GFejq15jQ$;wb)m9iOiZwfgKn{0_U_c6Zx%WUx*dnr zyZZlP1YRQENxOG2tNS)GsvzW#)Xl5%`mT4GnBYM{uSkUb1yBV{j!#n@R}(ftT|Po- zD>18-NRlP5Ry6kI@03d%rvp;ELHVUa4!&?Y*fafu{X$8R)k;DmiaHCa0%G8{@O76^ zM&fvqUa`Q3^lUGlB!yF7f4r6Fpp-*8hpTZbw9Gz41*hIYH%1%|`r>)YBukD$3Tcwu zba=#h#`0{`azWXP$J0WtCI4+v^HtvgQOkRR1j8WRSSy6-1b zA_K!y8xp8;aYWMGugS;_p$)dMEO#0rGi+vgB1x$hw)|utrcIORQW+T;7?Y_Cbc;I4 z;A7Wk(&1gwh>tYLWY%QX5V)cvrPSvbN?J;-i94qvmf)6mtBiQkn6v$+MCYKk&eOQ>=5p;`8`hko%fw@2u z@%MpUF?3=vfhF|#_^DyLJmiWK3`%XjWe`rCq!U^uYp>Q-1*z&#%1K#^geEPt2cmUO zu4NiWE{GYo%Zgn&DKpvsund0+|D^r9=3z*0Jg|)ex~oVzsx@=xgi|}4RD=*38%_2h z$n=+!F?mXY3T+0nK}&Fn9MwsZB74doyM--{$o}bkHtsxW8m=NN80>Zs%4~)xzwY4c zrCL0n_SL+d4{S{BfgZ#mI}g6k^v^+-b5o* zhHA6Q!tX8owppggtc$}4eaL|^5hfpZ^iseEGVk- zWem3nhI}B_*kayoDo@fULTN_Lug-Oj?yA&PPNC=^s=xr#u5hykp30W@u49A9pLYHM z%AZV3`h5A>OF7IPWcpEsTxSgKR6`jTs@#(zR2S!oNo8L5eRMm71V6N1t{KH(A}XUl z<4!LDF+|jI@#MJsU$!3G9f+^}CEN>qCZE{oKhD!ohT(BqyoEvjYWP9C_nBrgXpRGe zUIXR$fG5vW(=P-PDFjWbYquCBd2fJw1$Uz=%8M3~ac_PuF0PZp0>{QMq4^ipy}ol^SBBJ9;v zCfXBs&GXL?PLY6kQNr9XUG|XLEZ@7njA4k=Jk1!n`K$xfKWS_DTf13cEL`H<>teM!C=O3WE^f``0B){|1 zG&->_1@Ku-Y+^=&s*RkJlXst`_bIk$TsJBNcg#Fe8YQ$n2Clumy-71Pu1{IbS9HsjILJgIX+t!fUOEVpRy-6OmLkDm>x?Jt z{XVz8V7d5wwZB-8%U0ZZ@=xFO!GHZB0wz6pq3A?3y*%Z8P-@g)BHidF7WTwv$)w@W zGg)ct2lNk6F=b7TP+X_X|HajNz%|i4@xyin0i|~p5_&VCgTO-zy@e`GNkVVZr9TP? zfe;A2_ZF%kEfkSnMS2I7UIGFFBF*=Fe*d?8-p%Lk_BMBOcYC|DGvAq=vqOIWv`hUnau=`r0b|I=yA!^* z_gy~e;4(`+-vr9uAZ5H$#rWus{4Iv^hHj&Oo~T^*VkjT#g^?<=xAs*;xZsV3_=K4HXi?Ca*9SJt%!G@!L>%1|~rEj;ilgqpb{?ZSh4P1X~BZ z^y1{JXUK2r@?0Q0KnmhxaeyxCf^VuW;0QQEtSeM8NfK*(V0s`{&$Juwz|`L=R9PTo>j-m(fuyA_*0 z_k^z_4LLXWkJhNtZMRWP)*vjkaWOwl-~C%h`p;@}Lu0|KZroOiH}8;9qRdGyJ*7kp zVMh^)V~Dqfy(C4wPjpS6=;=zMZeZTBkg)LF8dG8V`@5d=CKqHy9xC>*o$TTMGqQ)j zRo;Ai&A|4SdL*$^KH!#;;wwmY*i`H{2WC4_Iw`z#B9$aNv*ZPLi5o_H;)rdQz>^#7!LJ@P4ieSKfdG0~l`7R1Sp`fyo`a+YBS z*`Ge9kl{FvQh695@ql^ii_T3jR1r+(tU3pih;Vzj%=QUJ0ijq3cY;w+GDy7pXE{uv zPs+o}^BtKX&rluv-Pr)3=Yw7?02jK~1N+|S!u(G4_0Ah0)gmj@Rh%hVHO?J{ZapQJ z<#zvV(sO6|PIZebOGvCcd8Zrs$`cH5{P#xJJGqUnz~puD=v5>b5T35JDZdF}fV{ze z4E})oJLq10?mI-s>0rnLB;>+b>5p!Y-QS=x7F#_eUwI9j)d~8$UICtryz|KrkD^<2 z5A>N=U&Z6?tyK#w)l=LdCMO_&?J@U&%25w9gOjsUuqNXZ;29MiRNmlP+#jy94gHhN zHiR3%4gH_r3o}QqVv&(!Fau91WU<5tMk=4O4jGp0eYy^SOL62!4>$Ta9ei}jHs+J{p-WeMpU-ScxQ1kbgd3SG9fh@u6Cmtv?$_SiQj%e6P7&Lj;Me7Q|bmj-)JTsCkbW30_5*Yb4LSCr-E?;EIwiwa%M$?2SG*+iqR*ZZ_-%KFb+lkYQ~gqr=Xl>8x|L2^;4b4Yx8M zz11?SKXafr^wH!#iWG=KDm2?oFIKC6NXxNTc!a%5m=hHDnxE9*Id|3OZXhSU%JFBG zsEu6Gtryr#+-X=iZW1Jy%r~1*)o~3`%$fKh6~d%%FSh8RW}+e1{j@Auj!eXZ~V=k5>xBLLR6bdl=JJCm17)) zJthJ_&m+95Zg5=qi94ql#hPLA`s=us9BtM4Pv$GOz^D(X(bQ(gto7Y$kP1(L$2Wtm z#Lm%qXN90RJ+|?=lBRlYQ}uz_V0D4JX3Z52iAl{G+MCldt$QwW^nq;@qh;VLJ%jUz zuTFw5R$?ybr*e&*ORo}*IgEIX=ViLHi}}5$jEV;4_2>nQ7CfV3V2kGHhK%2ygNT_D z)zbO75Q!4gM1fGv@Z$FrnYKnY>vBD}ojdR}MX}uibHo{@ zYdg-NM>CUZj`RzU`V7 z@YpkuiXPQCLSdz?C&idI(RVJLrQo@g*{NXOVx6?MfOZ?anUiaiH`tA_YAl|86E>Cq z=i2SwVgs|R!s1(z+PTvVdv;0j=5!wm2EGQ7eoP*56t#F@v^K?~x8vDIRLX@{`HdSJ zwj(DlEhl|vj{4~M_C8Ng76$v5K7PCP<+~w+mO;*w`h4v_*Jk5PG)t!Gvh{X(!M{GA zJxUIu$4{WWR?qp$-iA?qxNCH~y>NmwP;AihyvsOkwXt?@H{7gXT3}nbmu`3bcT!Y2 zo^v>+m&@h;p^ue3v>)1w)T@@80ljxB^_x7cPlHL+!@2e*Sf68bX45oIt+kqC zB96!1zmGQimqf*u#mDM`X(x3*>#itSnU%u3x1}UdKIWSng0zJ$MN@g&>YNUfYoPkt zttuayZB5HT)38imWNvXVaUf(c=H|S{yPYs2b;g1%pYMLT&l6q6LKec? zme>nReu*5|=A^fDdkynpIdKL{%A{*0@{OhMP4W6qq*LaE7$WI%%)I!DFq!i%MbxCH zKt+j@9gl5RG=`^7RdrW1wkCd@Bp4{4`@((*Xn`)EnpwCt&Zv9E+L`t-y{dAPT zTz0YJwZAQlNOIWSAcDF5XPH=Hw^}SATi(-?UOT`Xjr~5uV6}d5a zx=d4I>0Jf0r%$>;N?KryrYU{hoRODQHh=M_N$$(m4XZ!b-V3>QdCuXt>w~I# z4VrGi)zrSK@vWW2olEJC&D2(+Su68iP)VD?1~1gBuQsSfuuJLEYN+=Md?&+x`xxc7 z^&EB5#=LFIn^#XZ7u>mx4Ff77Sqabi-k}3OPCh!Ov&h&j0~tAJcGlK5@0*p56fX`f z8MQD&6@zqn+dOFgT+_fAhb`7tXtXlj#!pvAcJPYxk58mezJ(Pn4TyB5pi3uqW;snW z{Tavh%#zTu#Q_#|175Oc%_T=?p>u_w^$MoSywkRmoj-fX`=y2FjR`ha&e-o75LlIZ zcoX|+&X=}xKH)VhjLIwzg`6kmTH#;I`HX~aC0rUcmg?pi&SMssx=I7(P3zpmnKDbJ zqn}N>PfdHA&$Y-qw0_G~Hz)=F@TbQbXn#&tsW9HPjW4I|Psrcn11)4ZSmMSjq zLa!_)YPr(LSI4xr<7w{M$Q0%ce<^2PG0^zM;D>_9cIJA1%=W$m<+16S6|MBykBSEQ zMEzQuGI$k^WRw*MQ}_<;$~ms>VnxaDFCb zSBFhR;~(9bz>rafD8TMDS%FBH;bO7#xcBcm0QsqaNtKC;rzasM0r-PKwF5@=4C!#4 zO7$hiS+)ORykwFV-dUAm2;*Qi4QgB{$};MDa1$UPtM{Ko(N!pDATk=qP^|h~1ASPb zF}GY$!YeoM7HZ1mCjXT6>EhG4M}Enaw>|Yp-1q#Rfh&)SD>N588-}v5TeoTp-{lI( zQ>NP$3@bWMm$?kP-C~rdZ1PWRzV0YWnRLSd#jXJ*muvj+=UQLK$;Y5OK>~o_uINhw zSIY}}{&B;7*;p%)wU>jZWFQE+SM@Pt;H80{05gCyz?Aj(*aI7lN9E;TcF1`=32Cc& zrE{ou*+Y4rd#Nh+kd> zU#ddx$aLFV!z39{pjhpmo>Mzkz$s@|%E>Q?1Il|9^g!zHeITQ*dkFjSsrC?=3r2J+sL z(-@tP`Xx!Z+s$ieMK^3|`r6sM(LI}PE#>+}dG&5^kd{(4VqT5&-N!u2nmNbn^C#uu zG~G1nBxtaS=ah2na!MW!#a&>MKurUmGnk0zX79==P>4e#Dn!A$<0ZfNp9`b8+`A*U zm(r|-sWJGQJ;(QAMyB)Pdz37uYx*v)@H$k`L5DgB`(bQ-(1UZzgXYEG$6woOy0_x1 z_15Yzn(Edv3o19Mx)nf2ZLln3>{w8};*VoB3!0OO-`u~CH1yEBQi zuFBBt!d}Fm>%@q`ZR)ndGTd%`aewiV;h{?Jcb#34%WZ!nULj>U z4mpk4aS29lRo&RFzVEnC!*>E|W0GtKYcL#Y8HMIW`gm?%Nwm?`edz^EHnA_uU1C*! zfYX*OU6^H-QDkUVl&2;xK{M6cA`Tu8^4ONn9E6VX4{vx7yt&wkzH+zREj4P?ov=#Z zCHWqX4N$B@pW;HXJSnOjDl}M07I%m(HSW_ZmK(WNe154+J|Ow~I#dSiSniCcvKB1& zexm|vWG$#eUz&pS=*Z&2H*1v{s%Nwq^U^T=J(P-Zm#kq7AF5}F7WB%Zj8sK=s4%ER zKb4ybTZR*FL_(b8z2VhDBnWK7lMi-hRO&OlD~e25qcs@W&ggPjq%OOVe9lx*#zKtN zh+K+FI)=U*vdU1g^-Duiq>&n4` zy*@61W$(uHb+Y|aMKOI+LfdlItS+tC&+wcnTY9S{t;$RKfp^88UR-Nv6hnqBV~<;g z%^eV9et}y`3X2M5m)m`9Uasq=;bUsgBk(N7Rf=S@w<+MTJ`JQFn7nO46+J3}L3sNxjYOF(=;J z@?+}i_Whls?%62aLNb`%dAHEXXN^{^qrGv_;hjZHH5Y$&v5J;Xf9bBYkgbk|@=VMG zMpIK;7*ma;rl#y96qTW1RURqplG^$SYm&nr8HtX0rDqX>YbG1ld*V<*@}*c2hHoL=2fy6l+Qz zR)kj%qUs5pmS@BtaY94Wk^xaHq~nDshCXkrS`c)(<#y1RMZ@T ztEx3?z_rZ@?%h%UZT{}P7)Nvu?pk3mf_ga$v0L#4+Gq*0kx=utypAGX#2f4s1j27p zjp&ZyqU{?4T~|^~=lbxo#`=GBmT@@u|3mVB8(@Fk#`+{*Gkgq&EKx_Ln%-)52^~yc zBq1SjT5BELbtalbLm>inLujZX^N-Cog8R97TLUiXgx}sUcH^rf8IqTMDd8VP1P&l| z)tp4QCw!e=(7dnDotd+DW1aw9?&=PwY#N*>NQ{Wx!lw2Z6p31$QC?3L78a%!d@&lB zp)OLxYk#0fgsu&yQoyYHAyJKaCSBbrAE>%9_kpG*P(z8Bg3xHH+<7^~*fq=vtlv2L z*?o!@rX;gNrZMVB-SM_K#)mbBJn!xD$-q7wSav$!o#?1^al8#pyT=z;M z-yHT1vT$AJ1?SccK>GYV{{1eYeRFO2W+rPr#*RWnH2K92$7DVx=b8&JErlmx?^wFGW_y8wjR-Qg(BTFqD zk>tySiP4=fvG@G4#%PjhYO1La^7b|Qf!Acq)@S1miUlAa-AYY=`%J7O2F{9205D3SkH5m=XN#XV9Kl#^fhiU@`Wv#QJUs911AJ(4_=qL6vCvHVk z7R)kfQ8j^Q3Id z+z+EU)3J9Az4Ht#x*#7QLvUe93Vq=13xhyxgM|C6*j^-#Vu(2jRon zV+MM~<2|v>`ce&k;4am4mZ70==-SZGuwpD63gr-7qnvScq7ZvINDmb>*DPcwb0WAw zSBxc8VO_k+)Z+?GL`RXDoO5Ij2s)S-ZtAus}XL?+UwAfL#HB zar_KfCb%9|3=35~p_|)gRGQNBM7;p3nPl{Sc$|AyxSH2n7Ydd_zUh*JadO@kk&m;1 zDR*^oiip_rsmJdrYLPqaIW!5sG1K6N5Pw;}3oW-_n^9m_O^$1OTXy!Y*_B2`edz)=&4_yoJUiy6nY);9bb8-E4z+4r61V4YtjA~67xha})v1`c zpcIuX9cz5V2~&FRe(o7zUVJBNrb5|B#eA#kNKvIEo^7m->hgZ>1Y*;W#8WCFS+yi` zj!~Y;4+?(*QPKr-F930%P_*`oe6x}KN1J#ZOyr;=_RuCuVVlX~MtFh& zRt8s+vuZE$x;d`20;N61hS*}H7et|K471&9iP*Bzwzu$)#K>QaItw^!~ zDTae3&gNq85mE}R)K`pI80nPYAlGZd*i zFn7nL6tJ)Lt0oL2lG{|#IPyu>EJh_WK6Z<>Tt%~U$CRFkn9h6S)D+s>GesixyEGLL zKs%Icj6Bm+ih!28gNo2E6dBWo(XMw-2XM(D@`cMB-*6yYB6uD&6EA44Jh&@K?ajB% zA8lF8={whrzQ2HynWsCUt(Y0x=F&=Yn2ltoewa!pCb|HSynnB5P{bnsxi2k~4KHX9 z+8IeJQIq$6)(yI@JQ$Z4t({M8Tw=R4=v07GwOnC4y`so8{X$T8(3@LEp#0gU&LR>b zq8S*@P+fx*AVprND+-d#1@Fm+$B9a%*SNBIUvMfO%&HeNi!f#2+<|kp137(CZ{R4; z_#tuB!cd0(cz?F)8ZUn1iJ6J*bPg^y>F6M8cDY_yxL%aVF+mO1L211E z5jh__6wkqlj%OD$#~Xr-%XeqW@XpZ92#?66oXH>=W6P6!8Z$;hjJp& z&Qkb?W%SJqCwS2t#k#uItey_!Jxcu%>DBuBC=f&@<9ZwskuxTV^&gS~L~MEGz6If0F_tLRLH!kmFBo+JU{OhZdw)KBXGY>4qzH5I!ckGY2)RQZGuJCoU? zMPJRSmT4LowU~VJwdo|HHJwi26mC%&AZOMZ=~NU&o)+!ox$)aeFTnu?Jx3Uqp-Xoh z-sJBi$ z6001@!uxP*Bw}lsEp2v|X*wQ4{3|*E;-X14Ao%>36=*iIhn=78ZU_I{!lZocErsx& zrR1;ohn(?i-Lsp9H;KNtPu@Kh6W9U-0$6hU`7e{f8)`7FAoL6aEH!`^acpzuz3Zk&aH_PtU1Q0bV@MO--bp0n%0(`L5+F*#_dU_Eo zdDHZD@NJk}Cpwry#4SIX#coA~Nn(^+De-=`U;u%avsmJkqZIFNUd5m6u5cku$8Tn1 zA3*tx1*gKVts3cO3)btWj^hGrF1vTFMnP3qql8uUHCH2P?N}n#&}owFw$w>?!Iqy3 zqz*)mK|HKdEj)?@4YT5+tM6Vi&)SHx#YHbp5NudIZHk9yT%^f$0%ZfhOzde=j7H*t?{BB1uZoNwZDML z-x>bY8M>uPpILCiDqwBZR8Sgw8Zu4=xIqh`C~EdID2-#BWf`lAE;4yPTC~r#F!6;( zD$d5~GY$_{FHh|UiP`kRHTxqAaJ*v(JXk6Y<&T+uCa1~&8Rj)#%7_2;= zM|+=8G9gC9q-vMroAas+O14Zv6O+N!4m(0K$6ndQ-)U`QSxt4~o&$^8tuCK4CgqB? zw~uQlX}=8#I%Io$_(H1+Mg3I=OLS=6T7p@>r$&BqRuB|R; zsX|8Z*J7IMjYEeEm-93I_rEZ%t*vPuZm7(y7@rkqu{=-C4DAZTsZi9(mY-{CK6fHs zO4!Zcb9=wo(8g|i+|NA6D|MsP{{m!O&jpxB4#h21h{aYF@f<|cb)i5}6@U8K|H!vE zCHD6|)g4OCUuIYJt*taI@rObj zCMRtl7)7`;NW6p63^(ML$fl>0^3oFON8nx}W7Ql{^_Nfz>|3qG%x#&oTL%;Q%P+3{ z&l`^Q&Pjfs0Mq+y>(f8AB<7|)%2NG?W0iitsYV(alYFL4!a^m%Q1T%xp=7?_D8yy+ zUFO%uttBxo`Kr5Q5hEP(ks;OXY@@c+w9p5%h(f+!Ja(mq!KzIZxmPhqYfg7ZyflvB zr~dllHj#g>Q4E^~=v`$^tU0oR^7i39wg#sTzJ#&IN|RiUtkU00Kg1;}SV*d9byFMf zAHSY(;mrO%aQK70s`D&jutrKq5f3W{4>yk%mj0kh?rcJ95b&q&E&g3GopZ@&a-SSr z9Upw3D@_%QDv8-F%f)N-e;THHrTWcV0>ulJwV3fl!9q|a5M30T-Av})pe{Se2#$#? z@eY>pVUBu;1WG?|{Nopg2eb>A6}JgB(%|3s=@$k-8Mkd$0yO9aEqf5wgA99f9Kymf zl{TKc@DLHd8C8=n?O_o8H=z`~HjpbUr7by|!u@vZ%cnqz1nEN<?+NUL1L;lXe>U&j3TrOheu|umyNH6OaC97N6it%3`kBKiBxxlXrv9+r!h_K%p%Q zoeel|g8RDi&@#Z4_y_obydpCPRDWr$dy!`S+vi6g^=!gl#O=Xag|?V#gNp?oZw;!P zxsP69EB^0XMs3^wTb%L#JtkY2dL1F7sba~cm!w+S`~%R80WJGK7{Q#7n)-J)b`|J- z!-KZl{<(e+`>8m#ioac^1#{_~t48goR_+`8+>>+51~5^<|6!uHuHOc}Eb{+^ZTJrp zrFp^zd3yUlNE9Tjkh^a+`X3&8<1ZdM=BwNiJA2={r68Z^H>Wn^SL#ie8gtrylGuN# z5XV+pET>ea@MyR)<=AtST$5{)z*}8R+}X;@s?5|;+Ua;S{1G=j#!*q=RU6@e?~5nE zYmvn599X&Sj{PJiFtXfaN13rmYE(spEOtkpwA^I7o3vKjB-i*2(*d%IA?o=JQ6#%* zgpz3!im$^GZG@4@bcQlT=!~rA6O-yQRCjoWyx3b}3^li3c5?_>)U`5Iq4cOy5$h`B zi~SFQ`)->+t>}E(;Jo;dBBTVobus%xPAv;rcBLJRRU>E z$iC6UKpxaavp?BdasbLEck1H6OVA_~l}w=JYSw%k*oE!fox)(TtFzv`RX2!mV6T~F zUJ&L2E?TSQ9RL@t4%X0G-vNwX$=0Mbq0!p-!4AQhg3$@Jo*vC+5q)Rh-HOdgf0{7e zLMW6)gNYmvVy#aI9>$Nb0kf^`l$9nFD3mMD%Ze{R7Y< zw0Ny)(eFoZ&&i%r%f~sl{xm;_`=5GSU%-l&4iY1_oxJWsa)^Y$J@e!Q$F1-B4sMO- zxxZ=uT-zl2>&IhSn38=O#{OKRHs96sSDDrLe4}dZu=mTGGG`K#z|9pF=Q^T2=N!YY zvdxL}Svm-)K7`@UT0a8bQU-x(z~lpFz982le30E#@c36(`mdb! zHYXme3hw*H9F!_vb-Yt_xexKwad*MmCUFz&QlVZ?rBYax$Ss*dJ%a+%OcWwTMXZbm z9wjE!Is!f?20V>3_yXg70SGLEhoW44B3F)k3mfaFj4VKjWCbMFj=Iq$zUVJb;O^vp1Z zKjD1{!Pt-q*X6+8(K~0YJ;fS^?!QwmP7O8>GrU>Gfv3w!^pE%1HIw>GIZw zil?hVOo_=LrSO>s!$pV+NtQaf@wDKr&mtaL8F7EEsg#sp0rPZ`$DeDO#aHna61WO6 zTTq;e$mkd0G+TZkA%Zzsb=#-kWO__L3S(2Hu&6t`|3>or>(UsbegtaXwFOn7W zX9rcj&e!12UzCdCbJjm#xmwXW{ei#GVyQY@>fXC>b^Z}FRf*`XvoEBw94bcvU?zaa z40VH^!-t}E?%3F95^T&I>3FId3s2*Y?6Z@<9@Bptk^4|rh7G|SQkw0v{uJ3BS!!w_ zC;fRiAjwYI?Y+Y*;U_Zf)_m$X>TI4tB%;jw{u0G>L!tY|d8@<8?c>5lwwlAtq>00s znT(^EjI11~I$cg0quLZyjaic%R}Yh;)2mAgc7!#w#`aPoKOzK#2iD{$>wg|b-e(LD z7Zh(!(XG-MFEoy2b+B4nbv1S#5wF4@E(s;VfuKpL@lad_W{-#d8NJX)5(*<$ zvj^}|R&|sEP%Gz<8e)GkA|5JeBgv+vv>_S0!^uhmZKp0q|6K&2N-K!& z`mU}p>D=6|E9|OvZxl^xBgO`mHmhG@Se4oPd*cfP_tnzi(99EpdxyfT5NrKkeWyFhXBlDA&`DX7 zPb|Txv60YG>P&kqM$-XR_PufS#fR(vR3q3=`OTII!O>Kxl!i1K^{}CC;n-q*!pUEK z&h_nDPnWxymEgT%Ue?QqsyZYO;@b-&CBDeEY63@j>{ntjy$oNfCqUMjy-sZN26t)ElC{t~vafhpac!-VU42n+wk z)cb(3Amul$++D_=moME|xM;9EEO3beUN|iylN{psW3~A=!kFq7*mF$Eo>c^!rlJ8V z#EpK)x8@b&d2JDYWH1Xw#GnNh!{+cc#7g{Lx_gyg=Bz-|p#L?#w!%y618KGM3d z5?YUhRazZR{35jxPgx{KB`>Oltk;W~vEWlNxn zj;$M3fHyW&Td}ccsK0{j0ll@Ek%nDiZtYyq{9~V)M!jl+PqnUf*tcj6mST`3g(5Dp zj17lB&Rtu(1k!hr5#QV>#CXpE9^jlb z#V(vA5YqbvD#(ODHg{Ni#-uItfv0$^5R>Nn!;+n74aOfJpffI= ziDsZQGuuFLol`4KO$juGzlo_?yP{$9HhG_?aEsmSC~17|YlVoGcL7Y!itS{f?Ng}W zOwZ~|Zc@3T`q(y$xQLMx)cwnXcaUgYAaMS}W*QfTTgbCA`};>#iwJ{$Sa%eqQubR# zhVJ`uVa>W>%#mUJdexf>b6xhh0&bF^(m938B#F2JbumgYRV2H9F*B`we00=_c`kF! zQ$A&r3w^yFfmXAK zHM&f!J*jq?511Be1uTAYoVjHF_0UC(0z=e2Wh%Lv`m8B|S;S()E+OdruSL-uxg=64 zAh9gl2U{MG%Q1N$3ZDZEgF>_avl6zoyE#iT0;4{_#@sV{C5Jrz<9oOgsdq4yzq}6x zB!Ej9_;Af?>sCt*o~p&vq~1wmF+NIcctZl%oBq>w0QCpJWe4}762ZX1z>sm-09>0g zS!fAY{UO4Dew-NLB+VP;TfQ`)taBi_^5c-OXMbm7qYG_N@ z+_{NHbORP?O$uR5*QZYg2Rv5;pv}pv?5ugf7>k0maS8IZOMS-X3;)$Wfah{w28_rl zMeyznH`KS}s#&E#rZ(ICx?vvSNL#?s0EE?C#&v_<0Zs=z*o@em*55}5ET{-*iCaKZWgwj&+fU3F=Lg&c=ED}>g;bhP=t;$3FF2lfM_4^guq<}0_!ut zFL5O8#I(zvz(TE%&nX2}=I)D3L@)Z&FiWhy9F}Ad#sUlsd7Sx{xt;tOqKw?AEn{?xh?J zZ433>M@RB)S?MuF2Dn!r?MfvlxpmWW_oz;HK_Y&=3s`&~K7t`2=wKuiBpLLnOb(pfczHRneE~+Vq^SC1 zUrfF+@rvoOzqpn3$$$y^>qdp~EtMa5cUgL8_0sH`Ohf&tVI%Lwv>o;0kgbC{nXCNA zEoZ^so9TuuhK)|+elC`jc{$@A!~WKW<;jTQ3MtQc zH1--gx$*ws1Wx5e?*Y=Ffv39OW=2WE3SV^(%W;p*b_}=D9(1nNotFhE$pnbzq%l2= ze(~W{FN@4H;zJg=j(2%N$uA;Py>FY{3m8vg^e7$JjViPAl5$OlvU1HnmChNn;iFEZ z4!H0?zhU#l9u=BzvkNE?2Ew793z>ZeowHx4%im0QY&_RYYD#0PRQHkE_7*iQU-Ps- z-k!ouj`Qs-C+6|~!sB!-?zF^D%lm;Uibl((U~V-ceuX-n9F=dCWda^o@7QNL3s*)D zSKig9E>Y2=G)7PGWgJK@dp%M%1(y}T+RmglGxqirni}E^N6PX{Lt#y}B5K?Brd0UV z6ZOs5XuqxZ>&F2-f+z(94x*D3{nmpBo%!+D|7*==wfnh6q-sJ$cVF23bBWko=W+@2 zGSv(wP9kb3)q~TVkp6d69s-zGJQQjJ(}bhIC_u|-Ye)a(1yO&w(>-PTgX*bIQoURU{XYrSS1ADnmeKLZFLf&&TQRR2rz3`zV5vyWiJONp6k?a&|EX<5jdT-letguLs1OWPWpAMAxUnln!$0 zm9Ee~k)(w%c%BQp)GrQ^5$Icw?e(X1LQJJzFY$+_ z$tJ6v(s1t<%Uw;(ysUdc@~x-{qwC&I@y%}Ap!~j~_VXwCP%#G79H#a2EUxnAw`@}X z9+GrZohF;(c)kO3;b6U1|NFnj#M!>N7ge;s-xCtvo~O?I=ueP-Aa^e-rxFW^YTeCo ztVc$_>p_Tq$zh}Rf0Xwm!0sd_V?A4M=I84~Gv&F@d5HMdtSn_QdhBTGb6V*Iq>#tG z*B&bJtj9kd{}A>Ya7YVa`eO4rh9P<7!I1mlxQ& zRXa&~Glj}((sGv=jF==Ni)G)5)2arcz$@%OI)e``Vl=v!@zLj6OOgvkxC8XgxAKyn zS62|QAv3x3bbTi(0RFK16%&Qvrj1?ttmV%8wSDwfO-@%e3!lH9utojl#oH5Krn^4L zs~7CqZg=UCC*BLhemRCxQjFr=d3;iu_`vI~{3KJM{N$GGmd`u;C=cLrXPfUGFhZJN51m!(9u!&0exZHE)MgcJH?yYkuGJXgCxX_}~;$Q5c``RO|NT zxOjQl*J#~?aCy*8isRk)PcGo}Ex`a`UcUwA2g^TYE2`;Tv-Y$*zjSbwa69_5g-%Wm zjQpg|v{7DqKXGcbyNDf`KSEUh40RhK|KTjn!YuN@d#d~J7OU5}p&&=3mH#%H^@+!? zL$MRzn@@IxeC2%8r{D(OQZ_~S=)$3hHs?5>4@125NSgCAKRGohvCJF*{?L;JrM z6WM$_Z~5Gd884Igqm6SVLkBK`N$AwL_Mjxe;qE`IhYil{RvAYh^JiHCHtU~B=xMlU zCM=};itwgaFBpyWY?qk1z2ZO*GyeKtb^Y)9h6xAaHWH;^AM6$1 zd}If@&w3}z_Xc^%Ye6ADA6t$U^@zE0g;9nvuI6R|^ACX%e-}g1V|L;bV z?c+HKKYHX_9+s~cO46V)fs0d!O|9uZVd+&z;Q{LjXTY+u%FmM^=F_J|Pfhoof19fv zRdzA6RN9FW{`FexksT&;5hJU&4IcB*Dbze+mMnL>+n0IkbZMNmWI=hm$m+D7r!B<& zSCR|T1^I~*ZZ(s{_pvBx-f5vn2C@`MHLt(pVmr&*8@`x zMvf123c6R4>=CQN-WQ+Sw+$?xwM7_F>xWNi+&ta9Yqjcp-_Ec1F3Rv9ltY)mCH%)a z4NY&R>|{%%uj+7`UEyO5+tb8=sWqu8o2xqy)=L{)1?J*7)4iN;12LKDB}Tj519Y32 z@HZ2FgRBnKWn4%vq(T@L^*f)C*{ln@sO~rfYJQoEGp27jj<0f9?NYC4o+I#Or3vj* z2Rxd(kQx5K_g&!T(fq(;qp`VqB!^n)`$N0$OVV=h{#-l1zG33}`zz*nX5$OcFuH?W zFDUT=DuL*137p==W%j!(GOqP<6Iqs_e?}!^4YWBZcbP*T2Y6NeKCC(s1Z-#gD&$Gk zlR9tHV5u4WFVdygq{O4@I=TU7V@dn}V>YQj?Pc!mf5%8MzFB|r<+z#7YGB1K-4lnoRcVty=wYO($+5(MU#)sqxY1s z|9Us4YJf(_^VD(CF)M%zzqogncd&k;fM<-5)g4gl`dLiS;#SEsuai1*?3!Q%yDKtg zgLomnZzU&+Ai9{#29B?L#y%s4cuO8E*tSOpBxF0%4BJ;zHioRitxl65;EBU|XRM<~ zb>rp^Nw!%0NDTRH`d>7oD(iOeljpzHmsKU&=ojtyWogfP;oUrGf*WHRHPYvj?A3qY zA>9MmhCAP^KK;(M-Usi0pK@!}yi0x^6mlsB|1?cxWH4oQocm4&1^`yojqtq_(apMn zjE{BhswCb)lY{!%+2kwj=WcJ9W!fxGft|b?i`-p z9yI##iPruD$4*(F2VYqjfFR9PB^L^rZzR9hiRgHQ$cng#w`FAGtqu(gWQF>V*>jw* z!M?IZMMk|eT7v!yPE0hJu@kI|9H^=&`>(gdIpf!zN1gYkv7rl`q~SwMzvP@drU?cn zw}c*~#vP!Y_o46acDk46tq_Eof z2$Ljw2oEgRj$Ynv(&_mkMLI>*cpN82?=BnCRuo+PxB_ z7~I50cUf>Bwtl;O;|+p|h+-3f4~#4UE1GiSHN-h-GI^KP()efR`dzxEp0k)6_^wwi zPrLP)^U1x>R2;sEaKM*6zfe&1h=q3ttI8T0&zdbH!7f8D^y21mx?a-Bx)waOXFR~@;ba@##FmNJpgW-r8vBh@%WH9rTJqxOj?xT+4xESyd%ICk>FJ z<-D^+mLrqj9{4QElka=c$0CM*kK(ZmVb7hLfg&hB*y0Gcz>}H`FjQa~#GtX_%Rr znVG?1<~YpEjQ!mEdFP+`$H&%;eLP3n)vi|BOP7R3%ofLVW3Q9k!L<8wC}oumn5qeN zFmehcaok!D4%`Tvo9nsv>Ti-me<7~%H+|a8fh)6%a~+)@?;v7TS=FEKt;Jpiv5Sps za+6^)-Z| z2(2PlE?#2b&O~Kr7m9ajN!fK}i`Cbo0(+bg-2#m$tGI0FftC)ZcH4gEA)SvJIvs9w z;K2N?wrzSy;FuiWtE%V|S zkCFIET$AsLsQ8~5YU4kWh*iZR)E98AX&>C2>_A+ua)Tn;s+S}G2|GJnP3BNdpm9*` zSHA-T3bh<+$Rbw(68AMMu}~pjn&&MhGd6(44*1sgZp8;Otf%B(B`36A=~-j;^>Yng z(0lhiM=7gC)UHFckW*Gwc^1dVo2Yv=J4J~+7*REfN03&fFC;HJ6!$7$H&GqY1n1cP z^QDiR{;3Kb3Y=Zj|RGCyjw&VeKfr%{4#Pi-aS-GYM3wIP(NywP$tiC!B3cxUMHF zjVYBq20W(tm{MXHBf6mS8%fwg8Sm*7V|DkloQD|~%<&r*_s*)_C(Ji;J+aDVzg83! zSN*=4Ad)v;H4+HpTN8%fn44U0S?> zt^0LcsO~QWcy{j*yeNqbEOqY`(;}>IS~Bs5Ym+Z@Zzmo;ekUW=#F_Y|S&1V&OLm`h zpDU?Afd6xJYY@54S^wripEr0>KenW1o6>bI&4|ewVAq<_hS|8#0ucuHRiC{18lS_R zpd||F8+FMt^9sn?i3p4fV2-|Lc^T!4aDzh?Cx5hcy^q@0`U~+ntB~D+fJ4R6C-bn5 zwF%VOI^cqpZJhu9<6k9%Veo`$_00iI>#nYioeh^2?Zm_<;8rmXUd{Y42d%%76MAXA z&3g9X3q~V$gMUeyIrH`_30_p_JIwt^8M=s&+s~}3X#A!uDJ$!P&(Zp>F5$s4s4}>= zc4BV3I#NBKpWA<&&Zu%QZranGP#OCR!Fz{T(^h);$pr;pdfDoF`5Y6Dr}=qQU6V-i8a~E3H#m9WU0=pHfq-@ zeZL6GTJlgeEo!!!&bHEX-6X!WOFaE%X)gCajd{hgLku(aAJMsECM$4s$t&-q@77j$ zw5gDf0TdL*aQD~8<$wrEn+}94cJ0f?zMSdXFB`0&uj;AOZk{9kC@zky1>%coEQUj1 zQ(~%A1>d+x2XO1#c-uXM-%MLzVJcXxo*GA19+DdBlL##=+fNZb=4}B#T>nBe8a|mS zgW0LZ3hXtkWh?JV201t@hasoS=t&BlKfwneg;zH)zzJ&w6GpMn(e9&xVZeCoe<8d) zM*Dni77!fM)_Y69O2b$OD{W>IzNz-9bzKe-$#Cc9WWD{3=t;>)!+90tQKj)7$DG+f zhhxl{{+pLf*cec4>yh7;9cl=;v|Pu)BdpO*xhi82&dYP(xoum+p2t!S=3VKf&%4E^ zgV=p9V(#D}1K?Yf=f@RxX{7~_It5^ zpM;%XeI>>t7IW(jc&h&k0SgJ{|C8!hoF>q2ja?yz(2aQzgRN)dj$*o5H6?ZRcuc=9 ztqr9ef6bdFM{b&-0+s1Va3wXBVgGD)9lx(+igdPRBJWaLAaVh*&Yj?uS;P{yE)f5BD zD6r`|T3Ya?%nJ?E4y8EUqZ>4w4rT3ISCN;-a%h+FqyUUTmez=#lwAL5q>=Gj=3d$G z9(SsN$V23>7>v!w~Mn8_csWDLR2~xV^-wyd4wXjBKjy#mWs{DN4fKQ{#5VBn7 zc%9bR;)A%Oy$W#{4bi7h5zE*TsjsTZPUkd??zP|hohbF3=NIync?>mKYoFF>)YwgU{axT!_KpT4=lh)6 zLpFrJf?N_K^2A?=yv{AR)zLc!FqQ|r6x)(-+?couHLj5Bs{sb)9e03T*MaVj-gnst zlo{CB8b55mt*v;pzN^Z2-q1+oO^LOh6DEx=Sj{sR=9NADZ3!I$;q`mktFZN4g58XD zEzuS_I#+c`Azb;ve$4ARmY@a`sJfQF5btSy^w-jV>TT^VzT(FqpKP5K)m24l7?oB& z;0Ok`yLzWaJs27^NWMx?6Zkqv7W?y^ZhC=XIbr*9)KA4f$L`O-9PThw&xOYLEenI2 z3xio3!U_|@i-93;dp-;5LEAC*Wt6OE^%h(Y?$qpYVLz-4cu~~J)ZUMKA@PMz&#Gry#6@IaoRNqTzMn;NPrx_MW zo)vP_4P909Xq-)iFVFQVh`@u`40CI@dR*6}t}HAr#-P^AVrK>mK$><6;r;)VFFZQ- zzzNmPM@qh$$th5W(*7ok6aNt>@eJ&w|Kg|0u6gHlM@p`Jk!G6l2IFcoGi@a7?*plG=xRIAouhu z_|qnj0LD+baKbwjXM2O=20!l;($7C1%zGD035ztAJ_ogq!q`J;8dW}CMmtC`76rJ7LaFwf9E5ycGR}sVnX$;K;N5z z`d~ss`^rW*BFvHLb_H)$-)wD%FU*#RJ@{Ia(vo4naR~}Yk9bY|xK^W|2zCIV=3Wh; zX?+-#PP zmGx?4?g)kCCmNil=dFX5d$+JxMp(Z#{lZj64oNDAf!R+y6yP}^b%b(C%JlguehT4D zZ3i`aFhFH^3y=?j0o}Xz8X*R83vLY9%m8e6)i6O#t#x5${)ClnLgh?@c)Y7j-2uX{ zSH}30VaFfOIy_FWN)FNo<$W_H<#_}W1Y{9=g>onA8BwJZw^gH0F zpT4zdB#IWthy$CN9y};)SggwEh-{Ygyhg5||7Z5Wh6%4Hp+CGBK%c;7uR6qPL+=Pd zTKKxWo_dAVgkqg{PzBbw(E%pg6Fhcm&<})m==E=hRc1wWP&lXU!4{FrVH#T2gmv`+*>QT`mXF}rn+c(w7FbwBI{ zjX)l1_fPly?KA6#G+#|WO|zM1)D zvNg|PaoM%UNT%AqtxcB6cwV1R!XxawHtY@wr4@HLCRkOJJEVr(`t0}NV}lp8lSVwc zBrQshORe0e;}oeABl*$T3qF!#RkoMSXuC?h@)^x`BsQjS#+$~Fb1DY80liS#BfGukc!*OL5yuBfmN$!J7RW9f=C*cz2z-qrF$Ok z>w--!#9yIX)`<%B-i%Rq?y=bY@msyBde{yEW@W1TuPT&%l=-z5*&WI4$G}en=_+rx zR8hBPaSzLKqw|xU-f{k{haG6XM^y}1*wO(|u zM#d`Bxj?Axn)V*dpG24>;=)rQ;UE2VLqP!9#CGQoA?>9Hh&Ggux!J(535NV zr>gfuBC>ZqpQv5!mbzs26Lw8s(D`wSoh$kKN_~@;a^)1SD4^-9he8x`(x$Oj)OH&*~6}7ugNBV5W#BM(ykQsU{&6weW#MqpyP>H_wLse)+ zQhxUI+4*IG*yO2yRZb3}0*~o@%>W;+!W=qimquxCDkStkfjp*fkj)n>57e zzK`(CHj2C*aG5#9L6lRTlLve_p(u*2^9Gf&$PcBw z5A>T^B}Zl!y{SbXl+)Ixo%io&+vYnz>ezgQ&~J9Ho82qdoKd5*%O4v%*zHK{q4<ro6%{s?9DqUw*i}1C3XfOf;kob();X;VP$eBf%mBBE>+6qPol~_$m zj0mRNx+V=;uC2&O*z;-SUJ~K*OFv?kO{cV4;X3{IsBCM>rA=c-EPgfj%5fcOG4eQr zE1!Yqnnr)`+V*OvgH7)#5!S06z|EK$*Yu#;!oD>XoYtVGWmeU2?tK1C3Omjw!r6gK z|Etc-;!+d|As$WhkHGgSoLfQca0TmjdEEk6`)?0i^gkF>K}`h{-l!RaXwF(*wv~yl z5iMN7%_dmk>}pWKoRd0RRcIKjd{V(AN9jACr*N8UKn@`}@%+Ps~%F&n>a`c2}mhjZ-vev0)@__pDS(GiXEgGy)oQcVZ$ zfiGRqW#d=ijP9!f?HC1AU^2z4Z3{r|D!1VV5A_p=ID?ymn2l2yAnPs38#)S?nzK`| zTmgEhaSv-u5#Uea%y?UXw0tC9lW)#g=ab|9Tf$G=s!?YjoFr}?WBV^ptlF@59%Zq$ zz{d?%gLvKyK%Mh{m$oU7!wM(#i1v50&M+v`h9haK6^Gibw$FE0W>pn|4Wdg+0eY)x z4VqUvfyKyc>X&(AW&v7^ivg0>*)XMDB#P~Ku*l^Zt*6|TYNlrHoLX1>uGdXolAP~4 zc#Jrl77aQJ6W~jWmH_sf%gd_-i8~_7C{I0o=&UlVO;MAUVVkQYB{r#SyzV3%5p4XG zmm(%*aW0@;7dvc^jECdg6Kf2IvTIgATp|13c59x==`e5#Aq&UZ>LT*C>SUAeqVj*m z#BH&blJoI~G|oA(GXF1xb`-vgUN?jG8r?Of3mjL3byBYaX6MhcCi2$0Wz-Gkf!zLT zjjf}c@+n{j=D@bqVS1t{_|Dg}s|^_d?6)cZ3r!37@FW}oCa`YZ0ooyewq|Lu#_`It z-S}y2IRx=y>uNaS91J7lGdw6YWu}Yvwn>)@|N2sTzG7zQR`b@=kCc)@QOAc z^ej&xh5X`hO%%k;YyjU9PnDxqVyVtfD-aNNh&4&C+*@shp(Wn1g<(@|Om_poKH+v! zvpOl$)Pc0d9EW*j+8)t$GSQu`DAfF+?xNWpkQmoGUlahc?rm1i(Izp$5p)w@S9e8d z_vFYdKhGSe0`rGbScJT@^AQ48ypPxifW`wT6xFy$)Zi{+eg3xH%nw{Pv` z$<9tSiDPYv>+qxBegmdBSys2M_IvZ@npZsP+%tDw*UeWd%u*@opvk+T&%V`$E zU3KcWXKY;_TFc9%SCWP&zQ6+tdJU1SIL*>NL1fTAvS(mYhe*T1}NjB*dJ%Gv(o5M^Qv2X97x~^##o1W9S7)FPShzNnc5zVHBUF}>F#+Wig zW`?cTRz^9#y0;kS9ZdXZQNhD2H0K(H^gjzz`)6nF{GvP1#9=%fIt%IQtN?djc6^2+ z-Pu#|3GMDn9^9|TG%OY~CR#9MV}fe^tNY--rr_?doQL;m@2(D_sWsR(9GR zXT%oe1#T4&pDeuFtChAJbT z2yX-%^QoE}$C0~>yq~_MLzPCuId87MA+E8VWgo~jwKB=MoIBWl)wL2llL8L}b4?Rg zjba1v1YHn1eXW52_=Ci8`n1+?d1R-?x&3a|=AXlI&pHHVk~Wu-eHZ0z6?;1OQ}PCW zGytVASAsToF8tX5pSm*yZ#K}@f>r&52J(`=b=L?Z9K^x|y@KUjm(W}(=ugtS`7CJqx;Uul)X$@Kx8Q_tr9F|@`Kzu@HHhn~(r}RU*1LqvD z)om#$^;C`Nou5?x!hvL{`LNGN7X)hyP01f5vfg}D~ zXJ?fT_qo`E#PqTH)>zY=Ki>o5N_^$&@BZwARYn{dHPwihPt8qcb?3n&(K5x)Tn~?L zz`MHe#^Wm@%ef|~+sJuFCK{E6gNs7LesJpJRo3^1r>bCgn>~!Iux)cga*Y>*r`t@F zs5$Z?7{zpvW2kL!H=mu35>J$2pEET(ah=mFZl8}8=GwGqJhVDf z<7PosI&-6GfwDG679;tpY}(qDH5I3f?ZW!y@x8=fwY~mp7xzfC?O4-NL$hs`q2nl( zM&vg}hjG`UcltQNe3rU%eAFj#N%@%>K))U0d)s=E4*A|tp|W?nmv_5&VrRF>U0D*$ zvaOd-yHvVef&kH`y|q19s723Quf9t#zN;=>P_+2ZsmZw+*qRKB`YHNN#8iTG*?Z}$ zW(`|&jfc?btB1M^T`ue3Vm-^{BFJN@4fjy*$UM`lFv}%VS>SYC?zW;V{ZMm(d$MzW zitka``*ht#@5DS)lu{sf^0?W$el|^yfuMSow}$JLFK1*~yAd>|Wb4iW4mfzr#j_i= zL6YDa1#yo#ErW0<5)Xa=p zoMv=_r^9^vYMown3Vf4g3Ve?$;Q9qWF9L)o+sl_+JTInn7f#y*-6X4y?mMi*E0;jY za1U)UaV5w3s=?NA$sUBDyIu_2y83e{T3FJRDhthl`XY6X(=}74lCa4?Rl4MfnLoB# zMFK~iE}C@8VsNNRn4KG)th5g`Rq7|}yu42{1Ww7phTZT_WYk}SY@ppBUDndt)74p+ z{+Zy`j0ISRw}hCLD^G%z@A+xfcGDcr34^Qr;79>H?l&-5O*0wj0?S)C-B8X+u|oPpTsUc5uI0Lby`jS!^O@&( z0#A7BmEz`q&uToVGSD4o*da2_5QPcSj>~yweSY0s#CrJ)!M4V8AXuzF4)$ckRGU+~ z5f;18>AKSa!3u|9r$cWee8*)t2BFOb1@2irjHq-CiI#&KL))3#>AJ zTX)V0$ZKryZ~p?cr2zeL{2geB?jFLX{|`FX!S`RlrPj(prrT?qIDwD`u3E7CW*m4_ zwwsJTcR9ZYd$7jsxrVtv?qS5J4rcZ4y3``d0J6-$9Gqjal=PI?$4$6A5l021t_-UDLYL3|UwYiA{YYV{EP4CR^1m>3t*Hx@KFIO<28}t$;?ZKF!Z+`63i};g3~a@t0`V;9k$4&^CfXUf58Ot}1|43`zDXM}?2VBl0Ne?Y z2(1u}QL6@U87AEuezI`&4mda5)8X`n7|FYsdUIs7;^s&B!IofMW--2Myo)DMhx;xH zD;cY}EF@N#5z-rnJ3sh~GUKin?*#tumhXO4yDqU~S8l7!_Vb^zYvo7GN{Z9C_{|+j z@a3@uN!G9r%4_);#L%7RYxvAUDGV)FaE`2eX~PzlzPnvSu_AyVEuaKIo^~avfeT9$ z>guGBajjMOP2bZuz7ZEQpX1(E>zFL*8U$5{|zb}2sol6U&y=+Ze zoZ4ycD!KNo5JNX29NMO5s9Bc>NkOlqmT+eB^G;5pKbI$IY?rKp zSWyY%2`xNtHf^4%}Zmc*a=OF@{2tF zI)xX|YHLT0Qtvz`qy35>d)ZJ?EAX$y;pqL>T4^ajSn6d0NDzp;fT-~QDlWGiPj`w$ z(c7-TYXI!ihc=7n=;PRbmLnjBR~026z%({A z|H(8e)1L^S1{9wHf`l=dM>JdnF?QCPWXFdv0KUb@ zh0$_at63c8q-fveCN8c{(b28Y@J@bTOypn?R$@*|&$qii zpblnfVT8rMCFEh^0mcLQL8eZU0|Q~l%WKF6MOs6Bgt$YU$k?8Wm?r)nh2lNu8N(aW zc!N=Ww^2o9$hIZVYt@J8Q#yQEVgD+U$ckeWcd+#?8UHA+7JHV5-o-O$<&J^v2NxLo zF4UO2AJsJ76x`VZB_1$Va65T;|7;NoZ$959stt+5MYfsu+j^xB(sUOlvKq#kY9JWCw} z(kSH6n1EWRB$%+U75hSMuv%!A6aU8XbL?Y1d*_=p2eUOeT?cip5sA8JrKur6S*U54 zedvqv#7|jdZA*|x7xJba=ertBR`?fU9QVnIwv6ySuX4{FN=rCxXA{NPez<1zBc<3| zU%L4N8g@+fv4x*LX?+g0=E@Ar>$;T-PDx)7A&Mb|#UrySPca}dMLyOz65~o7MC@g$ z^Y?pse(E{MEssk}U1%x==+^Nu5N;eY)UFA?D+{L$WkP|T)u**`A za0e4>IYVXKn_<~*zW_n+o#HAQOKb=E>zhb7-ID5(B-5Fsh2Hck3W5xf#l65WG%qhR zT`Mz`t=qS!;#>MFNK^CBQscW1Ss4C1L_0zL$MKBxT@hF&y+X{&+97pT!L~e-4 z_A8vK;SPbp3t0B%T56dkWi!q|VQqxDJV>Y=BBY`F6)EwIabnAv!=g z1XGYlI!@GhV9-=e(pt$&z+Z^8cF!bA?e__u=wm)Ge(X&DpXIny*1K054YmyKqba% zU}2cjU6Ws^=*{f=-V2*wiB06ocj9kVncb_@7{n{J*?oCs-7f7z31!4LAKP04tnu0* zm)V`AjOgs<*4*B5n3>T=0J!h7_Ot~ad*_6j8~0B&wTi zDF8t7h$tJTtRVUd0%5a5Pd?0d(Q!hn;yLg^V+BHC4U>*<*>i4tvjron+!BzGa zOoE<7{h_c^Yp74*`>c_LxjS08HoS>-{$VTRi}+v9ElUfjY{q2vvS0k2bn8o=gc6~|Mm?dR@*Yb=**dsAW7n`m6q9=`{A%2*AD}Ssh;C22 zex_5IF{IUpC)|BUglO^?VoAl9ROKm?*~BLB!rIi(<~#P9zIOE!bsb)i>cU0ScDb>u zTSodyf7Q~2s^M+ZSxM+jHjTtP0#?;=lB@WgIM^h_gSG*^n2{wmRqQ;!4G8B^q~UB= ztyWfyra82&Y%k{^HxmErva0U!oQ2U|cI{^A40a&v!8rvDmG0*NVPx^3Swf?N(Bl3& z%=P%x(w2;b4h?pTOFlkETS+Zo&PwY}91rpn4zW@2e&MSs;^%#(ph4AKVE*_D>=2JQ!KB{hwqByAiQ zvw<`4Y}`-otW%>wen<7(#?J3qjy@kps>_-5r`$D?4HKoJINiSuyAOTo7Z8`#4y`(lbwu(U4Yc7`_N)q3zIki}n+T;p}UHMwj>3L(UP>po0So#pUEUX2rx9 zFl=Q+-Nf2p5z2%=3?qokN$m6e^KnYSlLtoGrVMf~u!A=#t|N-mMhyWS4j>;@n7HVv;yM@V#*LcyqP9Hr-tPFI13~xe+rn zG$hyQs~R|FC6F4CRiKzaj!mMh?D9@@3O!XYbx|%urSjR2pIynlgHYu7Q1T z=^D?f!QX}mHit9X4|Gflfm^F&%nRf+GogTyg`R}VGjNaaOZsELqI_O{~EH8OWsI-s_5x#|1H zgP=@W3%7~N_YqsKRSR877CTkU3YV8*3eQjd!jo?}75WEpK4F2AE_+1;ntvfmi(g&U zh-KEzfMgLK8h3c>FjB5K&)k0@l)H}C?RlGStO|a=NX^de%LR#=ODeM~{NZ(FraU8B z$$R;>kr*!PrI&d2qLVGtCuEMZt|GkufxA^w9gjMY_L8bgAD}m;JNOp@%P581wEd_& zON8%YQtZ@H#X(GC)R&oSEsG_?3kHJg-rU7qVmqtc~5 zDYw=Kd-G}jX$or>Kd!fZb-8m5rb*B$`75fO`tx_B0^PFn(__X%e6V%%uEK@L6IvIG;r+jX}SqNXxjv_Jt3>@u^!ujJ*$tVwVnF%OvCpA`;kOq-O{Rb{WF1*YU><=p9m!?PM(YXr8& zKHoq)Usbzg6m(U#7gSP=jY>nOJLKhP6SUqq`zNxKA>_Wl z5T?{srPneN63?ZX4KpO|zJ>N9%lc;p_X~19vfxdmiMImWB06?9nNXHNKCIFTjm zdmpl|-c>JQ@fbz< zkuf73cKcB<56!Y+#r>s64u_Jm7^H^Y_N_?RIQwOu5o&2k)XK1&?LjSi?{1W$k&2QS zhsB^3bbsx1Sx?5rGM3Ra0^VAc!J{lq{7d77G3B3zF+ax$rZ&*{3BU)5Rea0GO5i}k zRCQcrH{w2c(CfKrnDAN;{1{cx^#i_!+41{SMkd=mr{n3pxGi=5*Tnf+CBr92llT!{ zP~+A3z8;8$p1Q!=K>9CdgxhCvq=U?;PUFBg5hf0+V6XwC) zs4%parHgx`M_WiMysnkDRNRC_^LN$y9%JFd*bL$k9 zAUikNkkYm`R*E3S1@V`{@ej-Snvj|&{n$0AX8W%%44EO@ioF!ofBL>pP-w(>!ks7- zC!_R3nOIsj{Iu}XwVSJXqYDbWZd0apuJCZA?%AHt4m9FOVC^Th4h@Y)9^NbjiUlan zDoP{bYg1`#=%$Ca5%A4(EN4<;+6KN3wqf%SGS2R#N^q+-%{M<^Y z72Xw`^jnB?C5>3%^9ZTwiX;?qrg88V3iFpD0j;nN-kh!JPH!)Ep8sFBF#(ny8||cf z;Rm$?T3B-WV%0JH z$|}8THH1m@TrfBxW7(fK7x1qHCvE;EX!wL0!4R?ra85Zq4O3(<^MJXItS8g}yl0s(JiMk8evMO89%{2iNH+j~Vavwi&^;TuIV3gYr@2k?&su^%P(PVIqfd*1DqJJ%{2^KxOJU}%zK`c5UI$!f3pgGqGtl`&mpQ!8m$4nm(Om_akS|ti86b4`it5EhLL5Qc%$ZA>b-v{LXccb-P9`F% zp9{Y{0NC^e9SIH($V5vOb!FX37$YWZl49rRmQ$>2&^HgqjECFoPS}ySwe%dTK6HWY zVTORN_p#e3+zUGdI2H~OUXDZ426x`*X8LVJ5w^Bp$D?+#oYn%o4j7J34L305qvWz7 zF*lN=i3SKTsuY^}56ioH|0`~$ng7_CSxIwo!Q^rF-Z$SQ<#qaJG%2VOML zx@mU#T}ZKi4|vt7usKYdck$s@qhZDHm%yEQ-zENT=z(v+e)%(C(#WGn)j|tU3D|*j z>&96A67ikZ{?+6um@jw0O4*gcvX2)Cblr!Q*GXTN%VC_1%D6LrIAQtrDtjbCPPI1>Ks7u;DiYX7^<~O0Jw7SED{ocnB&)byp)6>9J4#_W z9C~8D*q$&J*M};{3b={cN}o*DU7Zn}nOoy#y^;B#MRyPw8Zd>Wac*Rto*8r5cr;Hp?N~C{~y-K!%iK5BdYS$hqq@rX={Z1EH697(+BCVBZ*KFcuY4m2d+~7t~sS8*|RtZMq zkSw^0qiMo!E2Lz%@Zl{yBmr_z@QCS16MwwMs3o<>MV~nb*}pgj{l@sICNr3i{DaA3 zN|IG#Q#9u*5~^VL(A=>9uJrz-L6+g5!92*%x?L#xO`=VBYq>1ik>U~a@#r_C%jN_> z)A*+3H0%=Uhnw?w%;5wM)I8q-VXp{>G_@kLN3x>5}PrrVHuwu>uW|`V# zX(=fA7QscY2X}cEmkRw>Sf;cd64%%$b98@0kuVly7V*hX4%^N$pMP=R?1ypFPyU0IvFTj_mkFR@0F9@r2PAB!*;F@P|tEElaXIKoBBZ#dC zKVtu)17^P2eb0e_M??jazxG-~^!e(W!^VWI^ zMVyIxOpcKC;?if?XxB6QJP{pzrz}|@LPEhj33;cmFT8k;tYNKTECof9N-!>k&Z5jP z2U0e{ZHV^PGH5NpLQ1#GW{^ykq$ZuboYt^{*Us^Tf$&=*#$fWkQf>KGF??7AK5@r_ zaCHF$i9Sj;NO)3I(v)KJY^Bc%a4BhBInm{s(mz3hFyN3>HJO~VdDtHpAB`GzqE9@& zHMIs{=5p^gAdiMvB|Fu!R7LN*8QwSsyVBCepnH34NjCm5x=wJ2bo(^XPTL=oMGB`< zO4;^1fWFk;f^`EO)4uq$d47i2o7}+gB+be|YoFRSxR3m|2EBp~(E^(}qTpyvOe+L}jWd~cE%eL*FXi-`!#MTo5DTy_2qj-Rdm zaEQVeKw%aX4Uq;>J&g6gpS|Nizx0`liCW3@jGm&FI%Ma%R(^&t6X;}4%y&9cPqal4 z&)Edq*j>u$ykpwm#;FazS-} ztP|^kRiqKKKPNCbj7M*V=;x-aSsWCC!2xe|z@4aGDQ>Y<@Md=(B)_b6r<}zWIf{Ba zVz}vn!=kYouQLYHRIqH*c`yL(BJ}$v2<`Z`P6lJdh>Sl$;EkNfED436}1~*^uZ74QFe0qal0k8BFq&p^?a-Y0nch~~Dwo>c~ z-j(|k+C`GiA)bCWWjk;|L^h0<4J@0YdLnJb`1T=DFCeHe_NGd}AkI` z!t;qaXUa?f1h`!9S_szs>5nWgQ&>U%!NAvVqk3ZgXHZa0T14qdDP!cDaHAx(-yhVZ z46fWv%RUcz*nH;{H6^zSVy&~pA3BF_t$fPJvR|Qpq-(ZE<(+)M1?datt5i}Hb(&%q zm^yP5T8cm^!f7!mxG+;_LtfL%f)5irPM~jb-Wvs_vKl7SezyNMa2dgToJ;; z=il;;K|yMRJ_cIVg9`R*g!StYk1tAhUi<@zE#6&31o|2l5TAZoe9_AEE%H}Qb6gLc zicvO4@dU1ppxG>~UP1b0WaB5NvckFBQ%jF_S?Q;xiJnJx zNF3I0ScHpJJe`ji8Ee(QivsezIhT{=CuqJe`s@}9*HLax;oc)4wI{EsxN59V*##x* zOU?d;$j*vJxp&N;wctPtOm*S|hS3^`kOy=!-AZ;?8B0WLq;|Z%eIZU&VPS;&@Xcu> z4NKIP1~xnpSg9qNjPAdb+>81KOdZXm%@H8-y{OM3yPRgnQdU<8%t`IPx%W(*IevMU zF)aXZ1ZXK{6#-Mt>&_8s7^qtg0I`g3lBfnoW@E%52L{V>LdrvCYZrq5CmH!Sat zaU&n}lkEJ1Pc&U;M{+~QpA3I-<=xP-HbNmCI?y`wb~!Y%g%A9ohFLJa5j_~H>RVbD zp~dd%$u2R3$DpELMzL#nfuDWR%RO1LfW~lscJ@p~f}t|XGSQ=~xSqM=WoCa&_WxY8 z`IT$#0v;W;q$QEn=rafWOOPm6_@(|`<}H3__1gJ0k-y!FAOg2^vA@2U!acu)+bNFz zAHeCU3Ync@k;B{>(0rij$wz;|0qP(Cq~RI8o%+Ufoj+?2sm;ccU1ATDhm2c zl}d{SVCk)GucK36@1F6uE5rx&abeRw! z<2E)NV8_>6ceN+{M@DWkp?9-O_EESMr1ScK_8ME8?NVBun+ol_<7c(@xuC6dKg_Qo z%T(FY=R@6n<`}Dv&Y_-+VYJ1pWO@4*X0@i!V!UY3Z-(%0ipfNd;(M3suz=!K<|nMr z2^A*8w@Mph5F@cUOBv0}u$;G0FUgIaA}WZ_^*sTXb3`|mX;UQ$gTMXyQD@#$k_c*4 zGl;%JkgIJWu_KOC)uqbMcE}tV3Bl92(O>P&b6gjqJ@re) zG1(73ehtRJG{Wm!pRPvZcPd<>e8zyR;?PO#N{K9tk;pbFg~wkOyH5DQ4!O=En`EgN z$q_P3QN(h0gxbS-cZ#mq37RT&P6n#^Q^ zANWWH%M86p(p3WKB`s%~%gbU{dz|38NzpXFHU({;0|KIVA#jutp8!b2=eZwL3($sAFVvGOx=-pdQyK>=EqWykaQSeM=t1@=~nZL4) z$lW`k)DgW_+N;S!v(ehUEXVJ9-b;WCX8D5+A^MD!&X*CiS4-$$2umQiYl#WMX-=)hqi~lX32|22H(z8%o-pShE29b;% z`j=PjX}n;czDC-SwTBGl5OP*55()AtS-Qzpnw|;EAh0`)^6)Q&d(FM>kGIOj1lvje z(NmPs_ib;&91%gz4FWE}J!xoVWDKqe%hTC=-qv`{(~-|kPWdEmID?L@BfH9?DpI>f zF%^bWtgK4-Hj!qYk7aBP;-cn$v6%m};?KqQz8=`rv>Y!HB||++ybj4o^wVwft6$|F z)|tGpvOhF6TTUkWj+lV$St?-6t6VaL&qOZdxb6!3S+uQ!C@nJ<d<;_AdCiC0@BfQ#BgVh0XSX8(_MIqSJP&5QzdP#KT?3GjKYP`j(??N28 z*i8TA!A8Ighgtj<|KdcFLGz=QtC!~!jfo6|DSUAU4f-0c_5kHpK013Kowkw->2U}$ zIl(;H(XZb;Bvd<1nY@EECz^q-2&}R!m$0SZ*)6#k?-LJ(B|6!t3+`pwzccutYCYEn z+=#f*t=t6leZUcFn-vOk;MO~ozS9tam+e0Ng~)yAd-xKqJcmM*g8VDChibT$>z9@+ zd{6`ltrUzlTXp!1;5**8K>#?Ko256TJxAS&T4D=UvQZ^8{eJ)fLH@q`MZph5^hU8& zvdrPV%sMN3kYPv?5+MM|rM(1cGL)_*Jz_T)yxa>+%`yF5kUSH`PvxG1V-aWdMBf#A~iKG>jYxPGQr%XHq#h{ ziVA?|nRyn&1xjBv9SssxX6?j5%-&jyY7_x%Vy*82UNTp6?u~P76kD{rE!tbOyfDKl z(wH8sm{9D1*a?9Gkp9H7hh;x?F6;M^3RBVA<+~C!=ve*30N6eK_Tre z;GK~QH6GMuR3BtG%3?%`(2}Byr7^M{$jy98N0os}z2lvAbZ9(|OD`Pho5BU`%uj8G zS&0O$Q3wsDrUsFyU}Ynzh6}{0sF;}cqIisQl^C4`v`qg1cR>^?F;^+x2dziZ7akdN z`Qi>watjeKi%b6iW>?sLAzTRr0SfOfS3X{Y$qwu!AtB%}Jp`esTv-U?pqBbpY@51o zBw>`Gnebb^#HbsH)w{s_cHCXXEP+pwmKS zwUXgGvg_>G-NlLQ%eW!QO+Ss9YsLxBe{aM8>;fzuz= z%Qjv=j)?H9zJ-L6o@bIB=vBFX^Axk0L%Gb3(?P`b>!7oU=~c(ZJ}l3fp3H7~oh$a1 z?G3q`!YN{LHv_p%_iQzhn79~}L zhd`c`IHgkQ9B-oyOEJV&RIs?evPz+*3BnN^u~@9iiptv(;E?r(?<<%|<|2ueO~n2i zmUqw}u82rH9Gxj%wa{Se6bcYnV=cwey%1=EF_3yA(3=nNRT=*Pkw1|i{Cx@&sWV>l z8cV)>%g|wa$IaR*`y@ycyaNY=QPHkkL@Yd0(3xT&Z&VhcxoA1Vp2jX;E+-P(%(Jw# znl$vPi@J?iA`#=wHXoh9jF?nBBxJ-F2;fW^a%B@sZ@dl;PY+AQ8pKcrpp|3;G-Ddi z`!epif2rNLaV!_4J7p+rg;fTCQ1f&Z&hr98j=7CFjFp*G8EDj|c#?Au@+5wwlFm#w zjP5TSJul2kyMhod1~{+pElj~eDXUO$l@pQ>f#jDHX(HpkqYT3{HVKY37|sb%K>`~k zI!NCVfS)=ha1h3~P$@1(4lxil0l3>V#};Vu4Y4luE42GZ!HI z$Ni3G&(AyYO^E8FN-N@E=phE!vYTM8o4zp~$u%e9oF^!EGqo97Au$5d=_tE^II58K z<%m=koJTZYmKhb%jyBLP*p`>x27YKhi|;Af(4)k55}}0!%d}3GvaLAiX(0w}E(@%V zstu{ggIBeAN#}O<&XW@{{XK)_owz_ z94rKlqU`2U==auCM_obzeWB&yxvo}98dEm|?dYE1q z3dD{&d&_n6LG<46HXrngcD7s@rUeh?R)ZKPj*)nyP+cIU>L?{=aEka5((ey4o6Qkw zifxA1v!qeWiIW7QW2tN4`I$x+7_2uFnXgMaAn4(^(~39VQl+q#yCLiM;wDH99wQsg zxQ6I&0h+#@#un1qPGi{)ViR*Qlz@Q41jvjDvjhZESeRiBKtX3X=(s?#ha%`5$>&BY zwD}^=1Kw#H))$YKQI@EY9-97Gto4t?sM)CVQ0B}sBH{$KK~|=Ijo~LLzm}QKh_eq7 zqA9ZB?(Z?}$;v_@^Qlf25Kx_^k!Zw5pzTK8F|GyslPH^ND}ka4_eO;=aP_Rrvk|^A zj1PFOrQ3onL)aTcE_p5J7X_wya6lM^;WjZ@fee^~2s6}J5zI>LGjd}lxTTEoqb6RI zxVSS22Bl&NkAjcOAFTw?F^=h|Q;o3OrpH6sG{eN!r)VJEbYWfzxG3R6k7P*N0#G+G znEIX3GKa)A~h+Z zB}U^47WaeqOkzUH%gnK2^9Tr1GOhusW>O5Zzdm8iIzp{VRk^_Q7f$k+Er@lt?VO8F zF1f!99+Bhe7%6#U(Q>|^cz`wk0J9Np4u&jXKuaw`jiL_D^pyKVcmgnjL*@|YfI*DT z4NzQD)l5WC@2{__X;TVue(-gAAHMKd7l}_@s za|@vK2A~SFZ9?oZ>+=^w#BDc6I6jf(4J}yu%}$^XCLO4f3pFeb)C^Mt5f~kmveIOO zE?+{_G$3AMO5}SaU}kzG^ih83uIRxkStJuG3`}5_{_V5)9wj?YR4){l>G+%~DXtJ35a0^B-6s6aJiN0ZZ=Gyh1N~@7?|_HmQ)--TqsNQ2V@)EQ6`XS#SD3J zm1NqYY@!)2iZda5ME?LzKc9wMw7*H6Sj!VcN&0vL89;l)e)XD@*7wX3+6D9(BT?5X@8=1(+yfZ820$@__ zEm`T=97inHz*S0@FcB+ruqNtP2xeAHN z(t6^Va^ntL>D!4;sIhL~jdYJqyli5wE8(6AeN?|_9C~2|!*n2T{{Y#J%l`nJ(oyNrWWi=T>Rk|K1c6F53rOY+LLiSpa|F%(8m2kT`Yelx^{QenOW=uu%;s7w zS(@g0D6%T(qV5xz%J`AUF&i|-c_ZH^G0IsKf9*5)-U&=vAySpFx~?{2CJ>@+hb)v6 zbVj&AfaWYG1Gz$FnMXn;>k$IbBGeCPl(UY9SU|&bh`A%A*E{134!15{nB%99P)CVE zE_w&5{6&Hhxgvm-rFc9=Y^WX+!X+F-2wEWmb2BsFO3GfK9wtp741fAZ^1KrExY>*+G7b*&vN*K~wGtsoeDCh>0T8?fA03-<8FScGG1`NY8 zMkeGKIzmS?9ZYhIE)inIiL7_kY>QH?DwEn&aq>PTDyj{1fZt3}h)au8eGFB>6R{0+ zR>LWe2=0GFybt8)nIR;6J!t@hSSni zwo@?~aFvMJQCEm#8_J3AKZNEg{@f7~6_xr1V}`o=q0wrinNb$tvxcDDs9-vX(p4pH zB@qV@a8^NpdppIdUNFFM1u)I{FhPiQS<(Lh2(=n=>QyNOIS!+#uSm*bcW;6oGs=p% zX-ayX6`6!(Lbf2~jdGD?v1VmJ!d_)22s3em7xr@*{{T)URgq@qIGQdcq--8ij)bhh z1UD4yltHKx(yBJWnsj9*q6MHT91J?H(3&*;(^G z7)hxav`}Tt`1DNYMZT|5d^1`G;!)Du;1Z|wJiP)hhBtEZgBpdxATUH-%15HZlO4^W zDy2<2Nu#t)Wh^t${{Sa3{C?0$WT;|XH98M`bj!GOZI>}?FsOzeheL>|)Ug|AJGiyF z&qKBixr{0mXjmr4O%UmhtaUCdT7xWLguulO_M*!v+K}HVZNb3w)j6EfSUMSl(>cH; zEs?z9)*v1D2t(3T4TRU==5UrZ5l;zSuQgKY}~5^15ggyM1@PlZWqyq zQMk8)k#1@WViDAFmnsS%7|V#o{G7voE{F>?%q-CxIDoD$Dq%9aBJ>bhVyi>3NR6=LO0lD_Fn`jCw4ESc2WILq1F z&`ttkS9i;o9Lt7X#s2`?6N`XZZt-o7W2j==P#r{=$(St*7#P3?Aq9wAylE{`?vVkR zS1<(T3{M0#(4Ap%+cm^0r}sO>wh;9Rd1u;l2ef8^jo$W^wF=GrY4{AA?0Zk61SJC) z6&!fpx$iLV%`4Z;mcN$6k$uwt0NH2$qAXX>PFKo(Q#IfK@IezdwFTBoKix;4tQuaz2_!EYF#7nF9 zm$^}sZl74Ih`Ch$6PQDltazHflt1K8`RGnQ8K(H5;8(B2;fdpXPYK&Z$OYu(= z!#ok}pAqd4-9$WPc1AvedP(UgqIe+|ds|ck7wZYBvS5`m;w$L5vFL7XHi?(IBx3m@ zIO-+aGsB)F@M9o%82tTH{thht6MOJQAFc-QbR`--eF&82+!4O{X}R;5#z(xuz)uwI z3p72k5OLoz2VYsQkZBVytkCD zhZ4H;(d$g7pfH@AK<_;0P=AbHDCM|X5}9-<)9UGE!e)<4%P-z~=OPkDv;NF?AO3Lh zo+x(V;S^3aIn;90&uO01J*IC^zS7cLEr&#U4ubyxiPGv4+?arts3(|Td4J$$Cr6^7 z${U7pES)zpi0n5lJ$*j7`E~H=47YxtRD8Ni!mY$PjnI>Mabzo}`EFU7e`NHd;GUvV zz6deyiAUkEtboc%VPenSZ)LH%cCDW?x9ZlCTF{ucj$<`x#h&j^3Nn96=nd;4#;$& zuAz)dBTw)yEbt?iV-s&NBcgcG?Prgm@JF%HFT`X;^tUB#vYPt|mu!Rk1bg$#DCLiJ znd8emy(@QTmU%j7xel3sCV3}nZG^vQuFzeh3-+FfkFFnz>TEOf>*iSkMlX6db$(aepsl#x|&56rIyi# z9MS}CtbpDs?#1mZ!D@iriup`#QDB-C@{~CCpmoYQ!1G&~W;Kh3y9bU@G(D>3JaDO} zOpT7n*=9{?X_>&CV42$@Ko$$3=5DIN;I}M2rF~Th%b}JT?+a28uX%5C0C{&EAuxIE z_z{Pw%L9XoeDAm*U=%{7!TnRg&m47oBE995rMYp7Bsc zDM)M!pyl`syb1R^eDAfj6EAd@f%)767b)E{@Svu&Bjt z9jLE+aYp=187ro;kT!UWX2sQ&0BY*#CRpixS@bisJi1Ta%a>!G^9@SH3*y@D^qPkA zqs4UMR|Fly*bWpcUSrnC)CuITcAlG*guc!F^)5O+9v}06kw0>u*}fbl*pJR5kgF6e^r!%8!%Qe5 zHrId-Re`g27wT8A_1sX{$Fqt)rS-Y0RinKPCK`39r*eR6C{~X>Dy#^gX83DjT$1P2 zepu`J@dq70=k|wFv&{pLSE3=E$OTL)`>q;65tHxq< z99{(kWkv--Otr(NBlOvP z_AhyyAD%&lx>&h?b~}2>c#LOfq^Da$23wYpn za_C@_b2XOny735=b`il@c@P;l=7B5lfr)+v@7cHbJ?Genc&wfPN{WQbn=PAD z0!#Bgr}E3!`>Bhje>b!OBe!8#Md!S@$3$!IKsy1;34+R>Mf;YiRBtLjY^!J4_nTW7d^D2t>STi7&7ls=VSVQSGwUKDm3&*3Cul;Gb*H zqrittHbt=C5!-*PO)A0!7`5(&j1J{=GdV8dnUW7MB}}HxSk3 zMFuwLryU(#+Ob6o<>%fu<OU99@FFd`V`v4KZ%hlEk)>`s5>t3g%{Oe1KJpG-%v^2Ud1WeVg2sU6-AgKX3&%p7dB@Cj42Wh}y`tRD84tZGhxt?lN%32Lfx>d`+hc6w4Y z82Q-uh7SER?&7`^q?B)R7scus+krqVvY&_stKr3(e1{Uk0n_TxqG7$UEiPm10KIF- zh4@9b)5^R~F*>E?MgIUx0Ydg(;Q~X6*?Uk_-D{Su8+}6kO~$s;P)(D=!6@a-RTnPD zwDDoszGWa7URC?)2p?8e!HD+?gg9_%3m%;l>ASA`xV^;{JUr=L6@{+iU)&;{)<90> zeu!h~H@yC$NL6)xJzQ*++~Y2JR0o#GHO+&(QO0_J83w!Cwkur-PyyBoc#zdRa!Ox} zK&u*B`46}t$T!I@{Q%&>4-L5mkw(K@#Zs{ZEGRs7h1y?vPto)k7M$+u8-qS^$OcbT z`w=NxAt(u1A`C9ds9{{&hzjY_J|C7p@8TMQ@hA2BLZjPum9cs61n~Y~v>#B;PLv}a zq#5wHS^#t>=>lbj&@C+Upgl9sb`KoGPzd8M` z=sBcq0?&yIEFtl-D=l#=59C=x0{xM1EjANYU4gxrB3R;7G4htyPcFMbqyTJ2yHzj4 zyfx}Xo{t_;S)0+3kRMh|^R!_!@cgs;{vjwsGL*whg=%BY))f^B5u>~@h&By(fg#Fj z1+cKdmw9>jj75^w5taNhdd~nM++HFO;P^b2U$k}p#=?aFziCt4Dej{#*FB)7u}#tH z*RP|FgOT|J?~=>bO87teMR`!ek<0iE9~jQ#49iV~bqnybq> zW&Y5JvpD%2Lw5Rc8cc|J?+8sHJsLFtt5)}baLoCwydNlW0MmJ}eox#oC|{s(*?rcr zsZp^}E?^ykP)k+BxO*nnWvNVQy;fBa3!}76g%qSd&5u@Ra3mKNC8x|ScUDf7D8FbV z@AC&34~R`9$;=mdtSR#~CST(c-}Mm&Cd`BWz={|x9ywv}sHJ+l(l=f+tXP)uq z+1R3*g;r%CTDM^aQD_?CUB4xYnHJ8|Kcir{v|D^k@s6)#E+&46?FnypAT{uD9jD@; zdp~KwMnkq(9+4Q0@LXCgzB)heZzBCBcIFmiK8y0p*a$0}1^)n0IJr#>y5pWAM2OIk zv8`HWBv1|PF0te{PmR@11FP#B$wb04kXRl_QNSp26^3(=HnNje;~6=Fa1f+EjBu(? z8Zrv&=``P2lOIUrRed7hXNRLoupJq8E}8N7mgbJ~p9^V-W?}NiiidL=rOQ)j{16qT z!5U*rsp8KTRF-|NVvOa1%`QzheUlsY#I9X<`P{H}<>2C(K3wx0r9I&+HeOcynkL;_k2QW) zA?Y<};s-Nf;86g?q9L<5g48zE!>TEv9k9O;RmsLllV-SKzL9oZwb5WL0Ns&If#aBN z3`%el3(dY_mt(}VKM}G&5sSaf6m+QO9PVHExi>T37v3&WqY47-AS+IpWE#nbhMj2I(2+!(wcPt0MnWs1M$te+VqdT z(%TQp8=LhSW~f8{1mmF{{{Vyj4&xd3=KlcU$M_AyT@BuoSa46VG;|ujz&(DK^3VU; z01N{G00IC50000G*WHc(07x8RkS5&gqUFcE;m+eiCzalGpcik|Pf6I<_*;%=daDuC z`;aLW<(SYw{VtNcn68l9{1+@^e)i9J9|$CfiH492JaQ;Bl0gJIKn{RG-RN`sf7{Ep z{o$esCQuOwC60U!MgWo{8-fVlv7eF;!b&aZz4^GDf=dvBPC5}4B$5FniODh21#G@n zbV*^btvO3_79;}{0D%W!5`ZLtKqUbTAdn9?G9q67B`4ee07o^Zl)%LQ0N8@i+X5E@ z_H&D|vKH^fu3!@HBURG&uS2^udvqHcyt)W6Las$f^Iw0l9U}l#tM(?3iNmB$8(*51K$8!59_*1fjVAo(P8PiCL#HAg=Fa z?#Kj#$R*!4A`nR;fHC3{c?6FMn=;)CSkLg!hBQ)jCDI7ThCtks2?QK70E}?l&y(4BvbM$%4d02$d*K+Y%S$gh}v$nwY2NwN&pR-&8)YTAh5+{5K2N^(KX8r zVdx}DrAmwsSgnd;6knd#>|iuo$~!Q$}>2>cot{TICt`Okl`;1878>E`Tnz!=+`?d|3WXmJ&jC$cZ-~ zB5pmFhuOHKIAq%Nlq+w{tzjoCxLFadQb*Yl3fMA_JU z4P&9}tAQD=`$qcZk)Eu03;j6%0OB><*a^{!#ZWwp!0(+sR{nF5=q} zhUA>zz<2wQyj25XQ}oUnpPI+TGrpN5^liyFf?O0_M$I$rlLTxbhZN@~H8XSH{9>xx zd8hRQ&f0%%*=#-MU}DJ^G|4h1V9UWr+dqXpNi??i{9*m~`MyV1iGB+s zpZ@?sJO)>`Wl;==Tkh7$ZHw$^K5AFqTk2*Es0qhAYGkWtT|o+&@}hlgl1nOISk0Q1 zy_RkU@FuDKZK&z87=TUBqaFO8TmW)dxYk)EloCTJ3#rA75M5a=J+~wgD|ldu@Fr={nS{{m}#fGkgC4 zRFKg7?d|h_r-xAOu(INt8~*^l-;n_%d`TgB_WE@s(sk)AQAN5W*OJ;imbKoWZ~p*f zSuM87W}o*WSeOVTY53gkzjfz*H0%!?KY>LLrg{K2q|nCv$2H$Z+NX*()~_?zq*Xg- zw?#gPJbcN}<=ifI8!!Suz~XoSkWUzz7bFw-caTBclgZ>@0UVM!z2VP<5(pQDK_F9e zEtx@PhDD%mJ|K`W$be5Yz;s7KP0Skgd!~$^?0k~`{ZhHi8-e%T!*$hW8m&X|uBU`Bw^^*Ya zc^WRvgpwOXsiPR=#PrKprVrtVrK8WJxmlig*|b6T85dGwvrPNg35I5$x5H1rUA~DY z^h+&?((@Bqs*@;F&whgftf>CZt)|`lwteX*Y93#G@M&+EZLj~t045Ou0RaF500IL5 z0s{d70RR9201*%&F(5%OQDJeRK!K6L;qd?300;pC0RcY{5J@D#2`sWzgf`?GnE-BD zU@Se}`FP}nH`HFFCb~o3p?GlLRu7T-_@2r`FETIC;EtmGa2*m|w4L9X2HWfJJcfA* z=)aBc7l28dEr}VSe>>VQ8GjqfPozceUsn$1N&13$E2AF|carJ0&lrMvjsp{bOv^6w z0O-BhY-U4xSP(dqwjdBC(l0S#8Hq9Fixw;{(HBA}!ZrtpVBW|EWZ>i`1c&PFatol1 z?11Q&^uChB4kUj?h4s+579p`OtFii*^>#u5G9HZHJi1OQw%+EZ` z%#h?U+YgM0^X2CCj!pxz7-7eRcAjwg9KIhU@8^TKca8fa&yhYsjkrPfF^C+V?=c=- zt=pn|OGrDGS!J2yt8H<|RJ?q>uW0z2gx60V5Z!zZUk}QV2akpgVSxDHu;GVLSw|UR zKIhbNg`64eB`mVf4Yu3r;7NqjkT~St%XUrphp3tk7fAdETf_3=zxSYMv^oO@zC z(#tH98>VHo!{4e7-tcRXKcIt#2?2$4#IO!|szNrWGbct*AYxNs$ zU*lrvw3CNLw8=im{a0_3`t9aOnUk*wbJ=g}KS>4kKd#T#Gxa~r{yK5R)t|CU!ZG!9Zb{yVk4QQTUqoZ;f_L;o`-4Uu&H??{xJtmW zF0QVwuB0*;_Bk(CspylPPghv&1(0G-sblM%&PP5A8J9U6=N@)!o^#ccuK6dtEOvVS zE&Akhm%{gz-WBN}Ez3RHv9Yn59v-gD`Dt&!hXR)@`rVd%$%oIY9OfJjI}0WnNCOCF zIeuaAd45|i?(iEJ`5t_*6`)Kv8nb42`duS>ojZ>I02n`u$C7NpYGt;84RXx+>X$+B z4e|?Q`wUA`-4V}lS%>kQ1JIL?so~L_0{JhmiF%LGc6dC6nV#G?&h5(M(>}mN6ErohX-kAb78Q4tRT-XZlr;nxx_vm^YP&r z_RouE$Ld7!1Q4+Lu_Us(t*pIH9awX<?UovsomXfVBq8(U6BwDa_zkBvsAi;?Cu!=InyRMm@;e_WtW||@B?9n+kSg*#LFy> zlFv4*Tb$B!pDs^;>o~P*gnT#P9pe;i8(r5)&m!f!PZ!i!g}Ypat=nx{wQAHd+=p{6 z{z`(DTPprv7y=egz&(}7bI1kLiY!d(+WHo^zD^C3Zy97ole7rfo5roPor1k>!ECY+ zwQ72CG{6=tSojTNY*@2mAhK-Brvy`AL4yUcX29|LLkHYoTMniBhB?!nxnq>m6r#szt{>-QP^ zv|IaM+kwIFZ=wLnd29 zvAS5ug2D&gn-+ND#8An|xD4C1UZq4HLm8#I2!wEmevzg_F}6tErS-ENJezFH@ujBg zIY&vDbM=c$#92=zkh1+&&|)B~2vQR=Cso*wY;LCt%h|y#yLRpV?SAb`4tmQlvBv^k z@6}~&oXjX27SDv6Hh4-k*Le%%xG}~ z0!%Y*-MM%X5k7D{Vd~p$hJE}d*|K?Q8S**xxjQ?BX=!cM_b;wM#_cgM+@-O!Jchit z?AkM;R3~MwcY@9xtj}km!rzO{M~Jp?UB+)>z$MbLbPJ?H5f34eFv?q=?K`&KHrs8w z=Xfrz$@dRd+ikX4ZwD?d(9C3*tyzyAO)6hfjAr2Rz$BL&nEVw2() z2tNpR5MWbmES+D?zj*8SIGo1xgH61Oe(kf7{c#~bgluif)Q zXYh=>;7s}Dk3KEklYU3PVRj~?c$)G9lC$`L6zOy!SWEWA`|cspdW6MIfI|bBeGt+K zw$VKKLSTe;kJ8&=8LEV?+mq*_6A@>K5ZhEnQrk74^vySKZa?H7g| zz0H<>67|LSF&s1a2<70ylL)b+;QW?Cza{1|yx}GcCq5%_qm}h`Lo1_;9O>>6?pJu2 zd|9%?@RzPCKQRGe@Y%K<*+Rj9Z*JSQKHcy%=EDxHM%zBF{{U0u{1W6Z)Fwpgaym!8 zI8F_3xr;)PcY2Qjo_;}(iF)QgF&{l6dg4ODTzD4AKG&3b;10Iy_}8+2369%2bL zqu(X5+~S*kgcbRikAoMfyLS9ISWL0TehqC;CcSLjfPr{-*1ZW1$EYlaqa)}b9gu9n z=&_z&s-}R)1vn+VgmhXu-EOD!m3_SY_D3nEbfg&4>co?1jg>qBWWS0Ex{A z!AS#Qk@y>;5Sx;lmIOUVySpoc#)rTBa`g`Q5TBmg#vh$OSK7uZ0VMIduelJ(%>|#p zAQ(tpVWc2HGbeWuJu)8mdA&}`%!1CnjCu3RF6?ai4fXLaP`W05dvE+O1Ki`Y?&{%t zg@&9weDZlFlO^EP~Yv9il9{{T2=m$n5XPOPk9k&Zv9 zZRZCW$CIlL6H92f;=zPzW-N`5&EegRmNs$KzaF#i4>R(@!o`jm{{RO{cJ#|aW)Zuw ziZcmqNtck@}PK4T_$VGRrKxyR~Y@#>U7= zZ-_bbX2QY(G6Dj^!cs@a_Ss2Jd|2Sk6yNq((8}yidiJS!752Fie@z2M>P(JeiX`7?xuv zNPD767GDxMXX4M{An%Fzh!V=D(w0k4#e{1^L{la+38nQokp5CK> zcyAdl<8Sh+z*T=xA&FNfz6QJ zo%UzhPS_syKNi(&25h6dCmVlQk~e6Rq^ElcNRwkNtGt(bF8iburcj(TL5y{eZ^H{oWH1n=SVxqXq5#Kz4dQ3TX4ja_F zlyPw1mLrhsjTb@7@f>7!*$=`1vFR^gA2b62^*Qy(X7DKKAFm7iq0W=Lzs!4c;zpL7 z_>3@$dxKnwYTJ6Bb|E49a4Fd@yPWmFyi2{m2j23NJcE8p^*?MhIr`(SM(q*ca7wG| zfY%6W^qDX3mN69zk3SfXC*#pF*YXYI*@aD$FLI7_l3ftLSE)IYn_VQBPpKJlay{UI zp@&7FkCta}-ndT72MRB6!@{t_?WgPaJ%t*ej0a>d(3#89U>@)U3F>NJP(q^JK|UEQ z`469P5V<*wPO-7vj^(kQZZX*2bc-eRDe_XH56^^@r>G{{ZLc3O>F1&;6G)f78G8 zX0W|~zCYvjWnZWBF0Pr5Z}Gpv7t+b~0|MIj7c%N4hn6f_d%(%D&9X-%0|)jGjJI2V zNjdE|cHX0mx2zY8r>){Pr*rztddPi0@=xg?h3afMbPa`v#hhUeF4tK}OF}VeXxaEZ z+q}uQQ@QW^5;k}D{*M0uyXkPl&*$my&*%7^74N_Hul1Ibwf5^e%0OunU#FW{gN*IGIq?2uJc(ki@SWk8?rTY|e0sH^} zzyb##23c(Qmc{4e^J~xDzlq|<9{vw#;P8G8;(eblTu8q7dFAySyeGTH@V#Ds56q4W z>iV8fw9Nho<|CGV?|1*i03;Cs0RaF500RL500RL50RR9201*%&F(5%vVK6{(p@EUX z|Jncu0RjO5KM--!d%n322LkNX86A^=F`#BMW^qPphI4O0lEoVAiq|g9y@W2QKe%7J z_ebe7g8*>2WD*$-g6 zZ?Lr`(=FR;JDW6C{?@Q;UhnM*cdNdonzA}G-{n1Eym%S--nMMTyJ%TWeP42ja7Zx< zG9#`Av+v8w@_!|&^7+TmW!=Js7mHVgg9E+q%E9G!K5jM0pC{jo$7R{^9GfS|+jza9 z?i+kZ*wvfO_Y5&eFvTZew)on&r{(2%f7JWR(}S9rmyc_5TD&X`?!EcmxgN9Ur5R(# ziMu~1wi@KGc5~kXT3yeRZQ}Naxn1pASR9TP<7sITD1HKQ1fJh72)UnrC05Ar^tujC zn^gY*Vf&1}{u#rJZ@&YnZ{4KFj0D6D;$67;Ydd^F>LHPG1|gJkk9;+ZS0U@=h%S{X zuZr5N&H|OQ4}+l$jNkOv&O>C#>?|k4Io|-Mm-Lk4!Z8*W(d+VY3!G-b?Pb2>_m9?0 zKfC*1+6(UKcF*v$>o@j9dTqL{&xzYQ_Qh)VH?8VdNK5Q%?&ya8)}wIXe7%QPa7=-a zKHeRJy|eCTZFdUW({+0S_TNd5wS;|_J&gUdcHdF4`vXdMYR&Dt#TTKTn`sTFrpFsS z`d~XD^<+JG&_3_$Vs;Pgf4B6r=E(P*w46P2BgAI?$L)Tr-edUB?H{!M*L}7gK~CrI zzQe-k#^1Iq@SOtRVGHYiEvws4yZTDSW^?XmFm3+l>tu6pxRrgt+}H<9?^}Ot292TJ z{Vx9iXMc8&VEZ%rS~;-3*~d(t%rDu0vAs*3v;EnL)3!@5hQ+W1qp77%HtcQT_yH6$ z7D2o)#~@{1ue}+4bMGm?43LKMo6hnXkbBQ|%TIve?1mpRobQz0bmi=Izy+$oV-gEC zVMpViyTqatQ?{ku#0-2LamefQSbQPjG@n#YMs$~FC)Bs#e~8nor(7;hyZJu+4h~{o zqZ^vLToCwgyTo`vopa{@0KJ$58GmjWk+2zIwx`c2yB~_=&=~S~*z`+F&qmu{fywZA zK-M4Pjto1(hp>EnS#>|9(BXlw?(#Fqh|EF*V#y&Ht6Ur4)!nw#!MyK#V&&>P=Rg*i z<>MwrwbkPEw8fLo%!B^`H;$s{J|PkO*i6W>y1QGo@Oy_5_mPpD!7r&x$vOl#rQeA= z>!_w^nf)yd;R7Q$cmcNIx?iYwg(2aXNJ|J3q-7T&+qznoknhQI#0CNpw!lDS5SX!O ziGngic)~Sd{j`a-79|?n>1AW}e;Z3utw}3uX3dPs%e9%*KB4UZl4ArX=(?rh@!5OQ zyek{s-~=9tf4g71D=%Ssx~~4m@dV?g(>>B2jrAojbc4qYT5SHqB-<(dtvg_S%40G@ zf3rPF@v!do3%(8odoKd*cvX~%Y*;PWUsJ>z0fV=4Tie9b;9ufS7VQ${ZQHkQ-Me<> ziQAY~Oh1kj_>L}|f$lx(NA6vjdb7=dolka0^~Mu@&PwSxzM0*;=NV&T36O;ovKO4F z%X1R84;qncF8RgNA z2$zdt@05h`>+yzVmRVtCw~64c;kH=>i#$(ob#(yX#0eH`!4K{IukFo0REZ%bO;Q8y z1MdFU_P?e5I6tQ}t_Qp`(RjDkon~mUx>(u!k1WI>w6wIektt=CS?5R>1d+ef0{+IORHT!={`d`v@ zVBTBAC}7)LG8qwjC$pApF0VfA&3<<^I|ji0L&x2?%%#3PoGiLReP8##@56t#{{U_N z*?#Zoe{1_+($V{#6yS#dv044F+5Nxmf7<^5y^q`e_x7{OU(#m;EfCR$o?sEdE*ivH1b@bohL5da*6K zj9u@#J-e6R0Lf!^gSpH`GPh(;W-Ptk@0cGMD)W>+Lc{@C4yKNA< z@Qu*G8T7J&JIGVLhbl(&3&0W*B+@DWLqq{B^mNE`)iua0Q5i$zdA?p zX!0BC*laC_`D?YIWNb57z1SVhd&)!vFJ^U{sE=dIbgOn<-O01bvgZyQGR?YJ*zng6 zdO1dA++K`xAN!4$^?*d8Ngkj}zL%!{Pi2XXeudBQ@z<9_0uj}u-Dt$mw) z%)7hYllOsWvUoS!Ecln&L6QPYvdb*_fjq8szq@sSi6Fa))#A$_iJV*A@dW48k0x3Ej}nKwZTXSpdl{Y2e?G24c_bv0k?LO& zQmsp2$I!mw>1unyl38VtTQ=s{SP;gnvdcfwc-9^s;nxWSLtH(**hK8(TWz=DwzqeGA?Fi@e8~R*UYowo}r7NzPgF-=5p??*kC|!66Rk8INx=netD+h(z2BLyw8F$(4t( zep&v67JR+OpYZ+pJ-f%2sffEQGkX63DUgQ&C$3Fp>xM?Hcyh8o(TL&r`;Ym5c$q?2 zGRLWG**kak$WGt5{)^KY_lp~T?vDHyqkD=6 zYh5F6BNHjrV#~K~mzZm8L@Yn!`;G(1L78p0=dG=!r^jvOHg{w_ShER-I3IaBw>cxo zz9D_cz1<$W)tS#gww1>-;pAam3^zN#M(ExBQ0M+S>e2u^oBx&Jm{B>@nr-;Cn|7N^-{MIeY5;6Ly2J+{))%Q*i4?;B#pw#qt862{%RQte8+yRosiZq=!>ZQGT# zfq}Pgle@c5H04od--Z@M?@Z52KAddi%EHFlwa>0Qw{98!8Utnf;r{^G*ULl$qFUHe_GEdKz2*=^&4{%^TYQQq#z?Ee5#Ia!2rt57=@cOG5-J(Ki}U2MgT0b&z4zd!@t`u?frADuORjyY%+P|(d%@+`u#Ke z+kM~C)w_1>e#0dri0#6?f(g+f{!RY?Ewr?>w4ZY22hfi(W%Ud7KEgZ&`jGzsQa@0h z7xKLV@QFUs@r%+gEQW4KVn8${SeIVqw^@HF?W8%>&#A`bPh`8R>#xZvKZWa(7#Cj8 zEoxpd599}=wTU(S7nvzcl2 zAk>i@yz)otQxfnC_};l{UGNOZUGNwZzfd1gK0x^?^#%1l!b5WYF?g1nBQqd;jPf%A zSFDx6V6sd)F0LSA~BuFU(RQTa;y(H76iP@N?>UG zNkz#oR_!n6cKHtKSJGp0{&U#KKd3B8jmQtEOi3><=K+ZasObcDWtbZRQ|ey@mdp9w za(Zuk24^JBwq5vu*2%oCbNBu_h0{>L=kJ4d7rXFj*$cZ-S6N$D4J|V}pmzD2~mtn~DEiACKKjN$en=cQJ&J$^K z;E;I`<&ggXk1V7yvhT$Ut(cb$nBvIG@&N@!LFvId(=Tl(Bft(*pzJ~69d&!n}1nIpnr?NkA%r7OK z#^fY7i1{L5&tq{F+k)%akXTc$6=9JRR!8o7Fz2uuHnHUPHvThbK<4bK6GT*3Sg!E# z5Aj<_H$b3$-5a<6030doKisR-;VA8y3->4E0wDGasP+WwhZGQJQ_kX@M|U85XyH8X zi!e`a@$n!Jdj9~|EoOh|_3z);-{e6#u6lL<0KWeKBE65FpJM+106)L?`uM*5?|190 zoiRW3&c9v5)7$F4^V8pdd+O*PZ|>V_UG1>_us-eX-aIjRSk{{RX90H6DP@Pha8{S58%{{Ym$9d-WzgY)nI0L6{z zeS6;bb?D#xvLmmq`}BHq>pve}{K2X8cmDtYKl14_#((Pn0R7>$hTnW62rKVf;?1Lg zVHg_=3l)^C$&$Ge!nakzw^Lf3q?+^PQQ4#SGKQDKS&&JOzdwIYA!~iG|0RRF50R#d9 z0s{d7000000RRypF+mVfVR37&j{YOztGvS#)NeKHFe+!3;EL*k5B&*7 z%FAxe#cRJ_W*Yv#bMa`*wP%S|`74c=&vh~nT*AR?j+c5|J#XKs4w> z1}^Oi%As%L{{Ulo&98D`$yzRVQBeX)5oiJz6^EWCP@&HoK@1x286MUmH_RHt7&jVJ zt?jPWKnDa?#Lm6~2w<_~imvUI$CS2Rm)6W}WyNp{Dt7V|=A!Hc^KN6g)sDXODV1gv`+p*{3S7oQO|AmKC~;7`C5DQLYg0NlB%O;eIX|kX3sq|>{{Zmb z2l|{#Kh!_0za712SJryZPMuF#>pf*E6;Xl)dlL>Wk^=n}n)^*F^%*oKq^$Wl>l?sG zWGz1f;DU!zlxO)&V!jdl~o$n8$ZXk1`+|l*J4C=kz`OBeX!4-)qRZTeCF9 z7k8m5bOEL0s|58b;Mi5P?d#kQa#Kh*F%k_esA3epZCsQj)W9+qa^&bb2H`;*e0@@Y z_Y`0=4cfGXJ_pMH0v#+8FJZ6S8r?+g(x%3XIU|hhZ=%yH<%Oi~WPbX7-kFP1B4+X# zHv|SwZ`BJCpbX$Yjve1heILhJat}X`&=;S<<40+K;M`n^4$~n=1~ZG|h<8sl6v~L? z1P^8U2-+Q}1o;3LG`r;2v#E>uzvX!9rmsQ(FsuPpfNnZ+2lCAEY+N4jz&dAE=43Cb zBoqjYRZ( zRIex*iedTwC-go2BkS+eI(d!3bxstXmBf4C$gmu4u3LvUs&tB?tO$_+L|uv{DloL* z?<$kU6zRYjgAAYzrHwMkn-d-ms|$XBh5NCzEZ_jE;#(oOiC@Fztjut$3cCD<%vAyi zwL=d0USqlK4p?gx4#H6-ka?A&>^&txRZ6b!M0O4@O6j3MVge=;t~*S2{B;YfwHtkU zRHsddPyT7gP(*aY`ndAX;X%l{Ix~?#VeBWw05)gt{{VeLsSf^W`uccrRfC%%?CKD) zykAkTr zQ#(hw(mlrxye}4-K~2K>JaN#9>xh$9ndXs?r8yBXwog$=zVtW4oZa zI2DjWl`Cx9$K{`niJc-niS&cvl#{ueR{sEgXY@V&BZc>AB|?du;igrc)Arhp45^tw zLk`t|A|Q#(6-8NLoCBjki>Vk2f*TKd0X@j?#WwQYq)$R&YzNG>Xc=&chX^2AK{E!a zwH>wp07CQ=xO75?04jGmx(JasOSZ4WGc_#U=iB<8z!6xIxI}_lmjGs4+y`fl6<)Qz zjEAB{#fq_)nQswLQ3QD({#{>}(qp;nq0;?6U*q4buWpeIm4`$SG4&m{MJU0d5@V897wcxi}Z%A%f@Wv@oO z(Q`r)nP(XkCFGx=1&dw7x!cDhaVc)d>oZ4a!^9d8s!Gx#CkX{%uYms8hJw{(3o>DH zP4a97(9jJcJd8E8@6>;7+FexIf0#%~-G;l&odAGn4k9&R%*w+@SG~?>lBvKVeKEKM zkfFdH)Nw`-{@&n~W*8f{V3%=bf@l!H@QBNa2Bw3UEQ*Cm#S!k(W0Ef!R-%C_I{`tB zsF3hy76+^Of_^*vOhKK!sw?~4XURc2`hc@JAI>o22!CI2SR&kX^qYZoP85%EyH@R< z(8{aEV&nL*61J-o(qS1}gF_+?Q9#ghVvY(Z!9e?{{Y6(a@Q6Pet+>}8s8t6q;Y@cd3&6n zZr3_{MQBALHAs=fCVC4*Q5Ib409^o4Fq212ilNA&awy@L^9B##TNT|;b$~#KFo@BO z0x6D|6ysHM6pssvDy6Ie<1EvLf&@bL_jU<{m-{xLWz`(2tO%7DVP*#3D5|ufWU4G> zd^1Sv$WU?>c&3uW<{`Wg>b;A#vel~0abnG){F`{O&gEJ+{-_p(h?j(%r8@ApUuIa` z$T5erFARBGn&Fe%A4!xWREdb=PUu#<{P=&a;m-3g?Q`K5z+^bNlmT=BiR1Rnn+634 zV^?zuirGQiEMm5?FxLxZ#i$mJGRs`b6}8>Ruv%HF4}KKg7fEPK!Z1=J8>G+}3J}Jp zJR=fc&{7Ot6;CkHq9-!Jg0(FSVq7$wbg|rd?V!zgH;&~9zMK!N^3QR&AZuJ>~n6{%m>- zD#Hm&C?<(bDeVlPjZ!4N9fIQ=ig}C-W^z_F7FU|3N+SIvAZIkEfavCFU&Q&W)AM6y zVAlcBc^T&zxNccwhyMUN>DQ~-v;P1@&SP3cxTT}P#^5k z5c@zbf@ZG7AMBPg*dPNaqQdP2vL%@jICT|@;i>ebvb>Hi5wV{QJ4QiZh@vrtr2GN! z=okh#rUg9?!1BXQQ3tk(Bzpe<_hS+sVeTPL2saFcIaxMU^T964j-aibIYDadS*#0) z*&kkb(uqjq(lZ?p?st--fP0a~JKj3Enk&Y|mclA+R$m3hRCGorkcUF%npeBwth?05 zL>Su>1R;!3W*ZPzrsgYF7XyH}78jA;REbvSbu2+@1=@tPa(L{(vGgG}82ha*QEEzK zPN32+0~|(3ro&&-th0iAE0g1uH}O;%wt?Mo&A-y~hSjxMcq9tg(cK{y!Y{^eha1GS zTyRDpps{(pXK8v}HDgg~FhbK4shKOc4DyNq?CJ?Nu|5m&x(-4;hKN?xoz8nr1P61o^cgpe5v$(=UnHCrXE!rBW6tj4<;+ zckN*zM9Q-wlrsttp4J!od6yXQAgaozW!+X?LX$>0Z37~~Ggo&781#*Ci7D++WZDKd zX)2Q&E3|qd&=LIx&=2u;dX`ack3>|48s8h~W{v#i%(d{)G{ASJrm%d?cWB5+g ze<&3m7xVqVo?lDH@wh#;pwUm#L3#>e;gd?^hR_M=VWHRn01-y^Xaa*F+kndG!Y#sh zGhueT2~tz7tLT5!Vy>POWJbLx7E;>>>)s|5qzln}RRMqi0EA@PkcBV>)v%nBleXz( zbG}GXxkau z06d|DgGL}-6_cRT7A+nwC zA_1^VOCCmk?Z#u#C)Js8V$iN`s9;(MFZdyD=66jQ)3wV7Ou>VZKcskfL(2aE1InP@ zX`;TyQokM}!@R;v=?TOnq$l-G-w`MB1MvkpK+_g?ERN$C>`_ADeBEC#9~jX}Q>Fkt znU`@x#Whjbs5BucBZSNL9wuiBpc&h0qwGv?>iV`4D(KcMWvA!03(d5-d{HD#Gj z)lj|P2N@iYG)eWOAyGThsCbKneRoOlm*NX#C7`g2?W@~vUvXT}GUERL;_-;>m(y4A zA*&VG2bJHa`He8I+W!EIlpd75HhuoG$rDR14szgmshlHot1Rrg5w=5v7CQT?`XvBv zzL8D;038SZmUtRnIJS=$3Zminv94PrP$*mo{m@7{C9O^p#|tw_K@nU;kU&C153x32 zV)Iq!!O;i^2(9GQ&8-%hnk4z7{{V;F001Z@O+w!AS@Dm=Wnj+=ll^Y`RatjhsIV`1 z@Wzv{7ifgH*^Y^*X!@N_$hxB1Cm1$8z=Dk01Vz}-Oz!TJ32pJt?_C?rOTmvonmQP- z;`UZE@gd6o3CH?>liBplvaepkZKP44@(g-_M+J3II}!2eje^ygTX#9f`y-KrN{v*H>cOsuu#r|?2tg)gCDG&g7hK0=R?j-E{N zLaD_=#iz>*N|Exn;Q4}>mnDl`{6_x(M?*pB5@5$g;<flkE%hl=dqoIGt zOohD6NWsKBdIX7Cu;Ix>az}9>F%24s%IfwiOdQ2XS_YBAGc)qC0}05tAg$G~0i9%k za0y~nVwsMb3M9+!Nmm&!3AWuIkd?>kIb$h=-N^7f6LQK97zRmUEv$5Dh5!n#0EnFAq!o|_o1pU+4cdyM)h}rQJBV10k`eiXM<})8 z9hWLy)=e%)fD;(DYFZ-NA64nu7gSq_+Jn^yZuYf|IfCgiK4&x2TK7C(d2z?T&;0!T zU(z~_FH1Oj;-vooKm51P_BwX?LhDi4Z}jh99@WMVqZ7)5k_rPPr5tK0vn;($U#>Fk z45$l5kl_M)l{&7xq@)_O0|B}Ze#MmNK1hIakoX;?kzoK@;fqL?o`Na@Kp+5=0!SdG zw^6bBsE-#gLohEYQCg(jfrOB_V@p#iRaa{R)5y@E7_zHEBZeG!)Jhb|HNf}fR^EFu zP{Oc?g^6qlBz^*6VN)_LsJ9c-S+S>5)uGwwsICoDJ5I1=YOKv3Jv#>aSvMQL34dLZ z%feYiG5#dW-ZO)Sn09XqREq3!VnxtFkX=!fBcpU%yL+PoOXuuYl1%a&Ej%qi|ZQieX;3C8i!5#o|d zLoJfP9nCe&PHni>fkSXJPO*Zx^N&6ji@Gw z#klqa6x~u%+8};UWYgs0>I>GJBf4$zbGQqEnEZ!QgNOhC^gD$9?n_U3Vkil8gD+-E z#vJNAE3&SdMU3|JElgMi+Js7Gmdfe@qLgu{5K&YdwzSYLFEM)pD1!iM2(TqJFKZn^ zTroZic$O!IKp3W_mIWX^U3#g3fTbL3qPzx+h!(d-X|u#|;_Cc5X|@)1n};nYjM3~1 z%3asT5l)wbrf2*4JD;?#cTU3ZW5Ve$yECeW;U_1R9#UR{d7ei{n7{k#Ep>hv5EI}f zjR5SBw~O?`qCQNR%CmbD*eJQC%IUm=#N=GV0H!f z0c5@+xybNH)Rte}+BI~(pp~DwR!<~4h47Yky>#RECifYf75gF9hawNF=*7CF0DtX2un;T|MX)Vn6`^mD?Ta(TT@KGEY(9DApEEpkSCpx0^9 z5tAOqSIqH4?%~PWlIeAX><=85}B*rzBBhG_kbfG=6uOTPeRZntmRM%P2T*V1vhvWP!XHovbyiZ66wpd2Uyw z;-xPBABLY@Akt3LwU_=JcGE&q8jno8dSxPCw`v!1FJO^$r(R$(6C3mLbG_^Q_UDB? zlf~$~dD?n+Zk@V*?dhy_4O;DJh2D23^SS_(;|o;gYHeJdf_L6M@s@k?!+!X)hBVQl zj_mF5OpGU=-j9LNI~uxSFWAwx7CCuxq)v7D+J+e76SO3}_3QT)AD~VL$LbIj4uqWo zz%SdJt0DKMThzz@0L#Ty%897`18LL3XbG{O8}1*f2m4sgxxV$BE}Ph>f$MD7Io&O# zKFVwE7@z!dyFQY?@I%|sc&D2@-jSR;_ZG>E7eOtI6NbT?CV{n;KQKa~cIN46&nP8( zE`#MK5s%_szU;iMex0gSnR#;DMeX*6);{yR=)RlIT)nCIEy@u5Vtah!t7a3J(dJi7 z0;_ycA6#CoOxrqly@@REtOmJ)vIdeF(d+$vtTI<)oB7Y^Vs!riE6ek+L|`qB?=y_h z(HZFT`w9O5g^zpD`4j$R@#Dpg(H>vaT-Q6r(`N;@FbW+%s29BD#ku&8JhKq-=6DmD zbP8`6l`dVPA0dt!7fWRf9O477JBsvBm-Eg}dRe`v66hOtUCe!A(=8BnrE=w36h~&e z<4>Dg*w6eHE~h zp4bMx8vOM;>*7CBe|bNko;>kqi#XJ{;@$^_H5W_wqPd3JVnVy7UDNd86K&q73=OhM zwYyw(AD? z{{R<1)oieiEMOwt78`U@QVjZ;pVFWB?7s2%#{LDLr&DY6rL;f75B~8#&oMtupggvQ zUw4v1C&R(1i!4YMx+U?I%V1VPV)6(y`m?30RW~Q~CZp00hZkq! zyQZ`FWavKO7uz-|=VN0Nc)1+d&A3Cd%>Mu|&YJhjM~r^|07S!IopbIXeJ63li@U+z zMs`Fq6I2PXd9M$Si*y_Tuf3R``Jdu4m2eq9W{k+vC|Y<6}pAJ5Ox1JlBT zqh8EMrojoI8(6&~fOQEqdxGbJ{Fwbo^^-%GD84-J_8JvSpDw|t5N zEamU1Lh_x{}cXPNW$ccihry`6+OxM;;U%V&9v}5=^FGV2+G#Kqq7owhV&`Q>3 z#Lv^9`t)P9={%%=&ot}?S(jPP?+8Fc^eWrrV!u2e4ujX649TiD+9=|=NDCu%A z3so|fPh@R={*A!kVk7L`0sjELfu?@@S%8yyMW8}xC;f;GeUtsP&&d-JY4&s<{>f$& zx*Lcng`~jak0rvN5@GPZY1=uylkjhx&TxWXo9TFGYpIM6Dx$bB`}%w*hAU0#KT@Sk zw@73R#-!#S1_}5Y5_8P+D-;O+u<6j*XVC!`{Wl8|V^>NIj4g>+rXJSfbkppP2_GFU zRY6eQTztwmJqWV>GUwLBdPw^tO;X<@bjfw)Wkjb~>BKNU&!;u3$L~2tYFvKo?i61sI7&B*!j^ZJ>=zTzOm zvu@V68v);HfS=yZLOXN7xX`+{jm63AxHM(PqkJG+-GxG>EpnI$meKgz4^os4jR3Ze zd^EXGzJ1L_FFrtGu5*ag#(2tx`C&@*p94_W`)cWS<0e&#=~~RyPTxH@>Fhk`rmLBq zZowL6d0B}+c8u|m%G@&!b1YEB77^~;tPtEYE`c=E*tiV=T|ZgtE)F-095SanlTGJ* zo$hzJe%${6lRLEk0GB`WvuMr98xac6e4aQnVUD6-1L`^f@dwndtk}wf3X>{vorO(^jnl4w;2vd|9~6NpOEexH|*371iyBvGrdBIJ+DM#PA$amyKNYm+d{% z#7cCsjF-(xC|w$Ple|wRf5V;PcPMUs6U~_O&n{~4u!>_N*Z2Bwb^idXXYjzM?R?Pm z2=CRi^mjvcf^nTQUkJZ3Ptu%Ao35P0#WK9WZR;v3^F!)0F7(aYOsyd4Er%VlqnoTf zBHo!CduT)nc_*)2_wik0YZ4s#cecyBE@@q*kXp$OiDjii?PfpjNqn?F`OGXE%cHer z&Boj11yLK;dQP3EYIz{X*hJA|XBXNL>$lm7rMIaiK3Cn8S4^*mh)=^wde zfW`Q;p3kXXnXrD0*hjW0$$PVQyRwP>AWcH}C-@Oe9Bd1FdlHq1gq4L9$SuUt~d6q1TKl+I7Am@v9fxw=G?_wB6AmWnJfaZR1 zrYt={Jai5h1bXyVFlUE~Z;4a1n=edob=Bg1KyjFOm>lONN3}C?g+Cp6d-S(Y7_^VW`2J&bzd{z+R%wE@5X=+C?@_<2R;T*WxzcS+g-VBkF(QAe}uuDZeFvs;o>x z4V-t}oV`7}PZ>K{^ott|RkqV$cDRRkGrm8Ex*hvBX;k|{Jm6R=d{35=P|8$%tfnll z79VcE%4NUYwaqC=o_bxqLhkfmyu7ZDUX%O8A)})YZ0G(z=FS-~#)`FtCB1IG<&42# z3==*&dc~>k%QtC8tE0;tONZgpDp3#%ewMw{yiBU0+33X?HR&!}8{E-M2Z(){j;|fM zd(HS6Zw4+mHtR8Wxakw0qy4L)d%Vm~AKT&KnQD5Pd+E2oUkE_W7!5P$pzfXQ?Np_> zLqwyo3ECe=Ufn)^p7B3FPVb(X$zB#op^$DAc}gs$F- zH63K{CGyH;pGsy)$SxU))lRZ24stnNN=KvROo^Iq(y6xx7e!{L0dbeq)?&M*27-9v z2X9)*c9n#@F;gnU9d=}51m)@1gg|7bUU!YIqxYD{{J#S5PX7R><&DOAdBB}EX2TWp z%sydDe?O)LTy~h}8u1RGV@>HRXUo%#1%SO@}tngV?LL^mSL9O zM~e13F68n{^DDEpKxXvmB^Vh zQw2P;pwp%i#PEJPcNc}x)5S8~<`IF@@Qv>+-g;{qwJCF1D`FSID6$fh1L6R$8QSzG zW2KqQAVbh*DzV$AsAXOh8ar1&; zy+YX7`$0&{aF-`AdG8|YxxkrjU{E=2$5R1C2QFn8k-q+`Bv)(&Usko9?&lThlyo>M z_fg!YXw>jbgeoCLgf&?1ismK^Ou&s7W0}dthq6p{iF%)2j&hwP+pi{Wzj7j@iAs%4 z=^REiuMf>LpKYZ;tq%>c%FDTFR8C=2Z4$-fFtTB z{K`Q80L4KN;RT{GbW3y8d@$Zb0yo7p&dD4PfaT8CCsCM1a_K#EZ9F;?A()-t=6CZ7 zq~@>~y*HkPoF1i|W1($Fu0@Osk7d&4{keuX>KVbx6;)z(lKLb%3Z4jf12NE@99joY zwvzJ6BgGrMUF5aJmyQNerHaB_U?{dQ0_Mvfh*POA{0QnAE>4d;w+1G)-+pD`F9jK- zI<^r<2}8uFPQh+oztK4ib)GJf0{Ud7dfYeMbKPyEZvTXuTHay_^Yn(U#+Sxr2C%{{VysRGhzG zMyd@ipuQtQ(-V7rUX9ke(<^Qz{VX?~XC0tw^h)^pnu4Q>p_?P|FvIj#I^pdI?3$Fe z68!Wbs7@qLCtse)fJ~|$ndgsLSfhnoB0$6YXVzHgYPnM>nHIAHLU>k#8_mB6SAN6ewu?g3h00O+%m zru4Kd54pzfW3q$WD9+ENzK|@l@QM)e2GCKRbu4waR;*vl9K^zK!nA`_L}v zF!8Czn#k9V+G=8962N6>ZdC`$U)?urJ>@c}6Fr-!6U>NbxrgM}_?91k8vOctg@#5BTKUuIi`FkDNuKFJLC{3g5nU}f`+0a@yd zT|N;108Hl<{ATREV{snL60~~E?2pAED! z%ql>3Tp_4F7KXK)2vu^$B=PIw$6TfX_X?a3)Wa~~k5+m^Brd4435~k|zHapp)xM@) zz9*UW&`sim9Ur7!e>%P7jF z!<7DexX3Py%0PH(10ibG1_*hVu)1OexP{$PtwpWSK?o>^$8*JeG|8yq3nH6 z3+fX=^)LSb6cewae{f2B6T46)XZ*>2{<3C%fi&Mq8`AZO+k7RC&yVV4eu96mNySfM<~rx1 zIb`)8gjE#$Pxeakz5f98mMi`K=}Px2_a8fqu^m2xGQDp5%eTx}RK4`#aK69f@W7EF z%5LqC{*a1Lw!P+@Ah9I>eV-??{dK3!Gb zXRPwgD+CvD6)KI|7jB>dL5O2rZST`)(^1az)0(r>Z4vz5YQ20AGya?cuSTkZt8Sc$u}oIJa= zmyeWg>@J35X65_F?)#Q)Pj}pRRU3*belJsE94+6}LwC)YDdd#$LUzw&sQagpD&;$- zS-yjJXSBXkrRfe_xl)OFUI-2(u+=3z4~e;RhR!NV_n!o+o(Wx@I)pke-HGUg#-gzY zIK{%V#YEMl#rCnj)AK9cp6|H*((cw>%D_rptJWWsygw6u?@Peu<$ca^`;OiH^u88( zVeg;TBeuUJ&vYTDQN^2_zzFyL(C8m=XS;is?nUeT&-zpTl*|6;Ed*!`{{W{y>p*BB z8d<{Sy0GFt=WKk>`|WtFsjC^PVDrd{{X7ZvnJs(*Sx=U z@6-ATto|Sxq|686RBbNegysSF#1)aDNU4XqcTGUf(=_Mqf7YA8{LdTic;9ohbd+d) z^sgf;+wBvISGVqPKe>evo32tJr?2|;{xTc&dF=3-`JLnUJkPlEpL0ELxpp7Ua~Ipv z8W8fn=X?9;)@~bj?VsGw?PIpS<1#1A%dfe>pEH)G^0|gC99T9I{n>AMZkqXtTDi;9 z-fH3BGgcphSBvZ?{K9|1AaoxD@&2cc^*n#Bk>PzzK{_;;>%<9z6Ij1+cKp#8hmuUl zb}JE@LR(PfTecwbKoaDs!SqM%MgZ7y#g#%+G2Kk-uJkEEf=SC~`3_kr5IHv*O-K~&1_ z#pu{IV(k`h5G5>fv`eQmVlhy!!2}{PtdY9!Qv628%Rf}651At3V$$3!aNVDA(nt~UUg9?2EdyTGjx z3ukW4!KI3&yv$8w-?RB8y4*0rYiOJFyB>`#~g z%0wd4D0v9c5mf;GDc7byNZt1mHGi>uK+{UQMalcG^G~`xD2}UB;&o_~|IxNLK z2$^wq{X|~S`3A%QTazfvBAK%{-of;Jqa*lgClp5uM-3V=i6$E`VT_)kx>$f~48084a2&%QL0q;w{(k#6on@kfz zrtB^QXx))YG04b>vr2=r7+5h096++YD3L<~?6NTKx8E_50mCk3vNrI#avk=>L5?ZR3cDsnbFeqd!WR*hkRJ9@k zq!=a4HpbposGte5D1EP4kYOpvTjpPeQdhT5@6vp+_x|VfU9$qK@wvkaGTvC5Kt#@5 zR`r|A8OE}?0e&0&K^TkIwVwt%AmA~6_4Cs_IfgW;x1oxN8gl@`ss&K1T(O+}7J~E= zl!=&0Jy5M<77IoTqY~Z%;d{Euzflop&uCv|K%8W1m>*t%Kq~?QT55@V)3MUJsq|es z%FPU+YhrW~E3aYr#TuqP=E~(jT!o{=D}CGW>FGb903-xk3j}MD9Z@m1<=9- ze_b6vBc^2;hVVr&R0ZA*M|ZR`WWCLAK@jX`vhTsNRkkojDQARAPrMO5ngt_FeultO zC`4!uM8sw>VZ+FPR9;(FGFkvTm+WKUiJ?Gnzr>*U0C)vIh9r8z|6Y{g2ID5mBOo+@tMEi%Q<=v#|2Xr>j~c$bp}Iw zzg;Xg*28wD4<<*DhXELAKssIt_~=xcAO%(>@WMV(+TcYXp^({#4|%YyJH%GK;mDHd zQ`1IFFnB1Fu5P@-`2&tlchEQ=G2jh87FN0I z1%rS9#0;CEDA-YIQDP$uCb!ZO>I;Iy=l7IVOuZrf62-vCG#w+d31}N^^ZsY@x+A-l zR!I4MIH{qGK#~<_;m6`XDhJ2=A9CF4YSt0(JuFtgt)&~4^Rhpfz^YTRU!Ss-O}$-} zVc@yEwX zp^$g?M)6m4M)AVU1$E#-LE12<%qLL7fMSE@8AUiy4H`_XbD+cKNnXd)-q>SZbpFo- z{)4tRWn_*#j4>;F>`|62BC&jwfjyD;1b+-1L!(06XHxxmZI$R=$DouLvcR=A_>NG7 z-^{Fkq+S7$r9CVtB&Z|=0JK{`TE)6gxe)Fm^Oohkla|5bphg_GykiS&dGLDN4itV@ zO+dvR!1?J#xj8l*YIg>{o;u!L#Pk)bg&)V~e0#8iX=`U)JDNp=1MhXGpRS5V&pHc%v>6>S1!%xnY? z2!Ysx#4231^(>0(;8X2>CA=rSaKlo-rJV^O-V))`!K=E&G&ta^NcmUheq>yzQXYs< zyfJE5b_gC38H5ZztQA&M`LbfZSz;bUu-Zni1FIc{qXW(-nP-M{sctUFdp_#`0%g!E zaIN2qRb^-7^o@vuuOCR(X1AJvE!H8)$~afdzC84qbptIxxGW-A;$qBNSOLIT8~{da z36RK$sTK&=Bu=9S8fd&chlWw0IbQF80ssI21RkW5<0-JIWL+7Fc28d21D&DHpgw48 znSO)gpW6>amGoF;l}G?Z*oG zeT~rehKc^h0B=F=)m#^PYY`|{w^{2z-B7{IKoCT+Iq7JbGs=IEetHl34Il|c9jx41 zw@juOL7E4G6*2&pM&KMrrPUG65{)%Lz~T6Ka|c@>k}O2RCon4tp4^1OYRnY3f@4rD zl(pQ~^5U*jZ0zKtLK}`;PIK<`Leeu3CINC0C?v5jhAuq#BgYts2CkEEd}1xmqh(c1 z+goR{Tn@F^b6x;8!$_svJ1BwRD22YZx&^;wOPC63*O;EF4Du zy@_ba*xB9Q3T`Wk-w7Vps0^wEuBG)nnw#q~Vdrh(kowUYUe{6~+kC$WcaHbQD3*hnN6>i{&8=7i&GD zbkJe}XeG6p2uKyn5WeE+nt@@Y16|pK6gXIo5P66T#H~1-Hx0%{ZJhC?qhZz!qr@?R z-|`4Slm~(_P*isSX=qV~*8V0u!PM_1-ioZ)lSK}IAW$~3dfX#QAx$+a?7)eAn88E< z3`X@Q$V%OQ;>zX4Z$Pni3bkSK&iusy!pAE;U7nke!0T zByG%OJ&Z$;H(g>1qN3`@Q8OZ|7EcHWBcK6#B?>B%r`KQqWlpQPy(4td3s}&5UDifkr>ypxJ&>{+QsW} z!Si20LIR*Kf)I;)yv*BfhC|YzG0X%*F5;_z>a&qxqf-{N7;6Lz4|PKP9tgn9z;Y@Z z-`Y22Hx&{l4Pv64G>09$iWw{#TCtQAru{|v=vHkI9oRQ2-qBDJkwxFqC_CM{G|>t; z11?lATNr^2z@QD}#UV|E7Eomv0)PMt3fbuHX%jxyj|{VM8s=!A`M@o;wNmG09- zC}1|_9jS#>P)5Iy>kV?X;;L?rPPFW5=*cF4H-+2hQ9*NJ7;dx#(r~yg0q9wo21bgO zWpWJK!ATS;YJ)(=P`wtD*d1;_1&Hcj2|)|uY!)F6cx5t_1c(GFm^e~qrs73R<46gF zb);k9UZR_SiK6}tuv;K5Gp@OA$j^1dXf>ju(>7JaX*WCXAq0#~9qx2P8iP|3W+d&=d8)?dsJX=u+l?=}`y-NPL$iSdLiS^rr!X%USPfPmf~mVJMidU}0F}Dw zWh0;uxD0`s1<)BA=$zG+Q|$Ok+ZX9UMm05_SLV2y{{XUosdg8O&-j4WZEe5x4b;#2YbEW&1Bl`J?*8c!r;ef=`vH657`5)XX^>0V} zi1t07xo-aellL#l`2EZBem`>jpO4(XC*$`oU=Od{ulgUj*8c!A_b$mRA5_I}B4 z{2#gEeLr%`KU4QSPp9q&WAy#c2kHBryXpI%te?65qCaw;^?W1#@V~kL01Nw +- Run a Pod that we can connect to and run shell commands: + ```bash + kubectl run shpod --image=jpetazzo/shpod --restart=Never -- -c "sleep 2400" + ``` +] + +-- + +This Pod will live for 2400 seconds (4 hours) before exiting. Which means we can re-use it throughout the workshop. + +--- + +## Testing our service + +- *Now* we can send a few HTTP requests to our Pods + +.exercise[ + - Send a few requests: ```bash - curl http://$IP:8888/ + kubectl exec shpod -- curl -s http://$IP:8888/ ``` - Too much output? Filter it with `jq`: ```bash - curl -s http://$IP:8888/ | jq .HOSTNAME + kubectl exec shpod -- curl -s http://$IP:8888/ | jq -r .HOSTNAME + ``` + +- Loop it 5 times: + ```bash + for i in {1..5}; do + kubectl exec shpod -- curl -s http://$IP:8888/ | jq -r .HOSTNAME; + done ``` ] -- -Try it a few times! Our requests are load balanced across multiple pods. +Our requests are load balanced across multiple pods. --- + class: extra-details ## `ExternalName` @@ -407,7 +435,7 @@ class: extra-details - This is the internal DNS server that can resolve service names -- The default domain name for the service we created is `default.svc.cluster.local` +- The default domain name for the service we created is `default.svc.cluster.local` (unless you deployed to a namespace other than default) .exercise[ @@ -418,11 +446,34 @@ class: extra-details - Resolve the cluster IP for the `httpenv` service: ```bash - host httpenv.default.svc.cluster.local $IP + kubectl exec shpod -- nslookup httpenv $IP ``` ] +--- + +## Accessing services via DNS + + +* When accessing `httpenv` from another Pod you can use DNS: `httpenv`, `httpenv.` or `httpenv..svc.cluster.local`. + +.exercise[ +- curl the service from its name: + ```bash + kubectl exec shpod -- curl -s http://httpenv:8888/ | jq -r .HOSTNAME + ``` + +- curl the service from its fqdn: + ```bash + NS=$(kubectl get svc httpenv -o go-template --template '{{ .metadata.namespace }}') + + kubectl exec shpod -- curl -s http://httpenv.$NS.svc.cluster.local:8888/ | \ + jq -r .HOSTNAME + ``` +] + + --- class: extra-details diff --git a/slides/k8s/kubectlget.md b/slides/k8s/kubectlget.md index 7cfbb119a..fe1496ba5 100644 --- a/slides/k8s/kubectlget.md +++ b/slides/k8s/kubectlget.md @@ -214,10 +214,14 @@ class: extra-details .exercise[ -- Look at the information available for `node1` with one of the following commands: +- Look at the information available for all nodes with one of the following commands: ```bash - kubectl describe node/node1 - kubectl describe node node1 + kubectl describe nodes + ``` + +- Look at just the first node using a node name from the previous `kubectl get nodes` command: + ``` + kubectl describe node ``` ] @@ -358,6 +362,8 @@ class: extra-details ## What about `kube-public`? +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + .exercise[ - List the pods in the `kube-public` namespace: @@ -377,6 +383,8 @@ class: extra-details ## Exploring `kube-public` +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + - The only interesting object in `kube-public` is a ConfigMap named `cluster-info` .exercise[ @@ -403,6 +411,8 @@ class: extra-details ## Accessing `cluster-info` +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + - Earlier, when trying to access the API server, we got a `Forbidden` message - But `cluster-info` is readable by everyone (even without authentication) @@ -426,6 +436,8 @@ class: extra-details ## Retrieving `kubeconfig` +> _Not all clusters have a `kube-public`, you can skip these steps if your cluster does not have this namespace._ + - We can easily extract the `kubeconfig` file from this ConfigMap .exercise[ @@ -475,10 +487,10 @@ class: extra-details .exercise[ -- List the services on our cluster with one of these commands: +- List the services in our default namespace with one of these commands: ```bash - kubectl get services - kubectl get svc + kubectl -n default get services + kubectl -n default get svc ``` ] diff --git a/slides/k8s/labels-annotations.md b/slides/k8s/labels-annotations.md index 59ebd4545..4e35e676c 100644 --- a/slides/k8s/labels-annotations.md +++ b/slides/k8s/labels-annotations.md @@ -44,7 +44,7 @@ So, what do we get? - We see one label: ``` - Labels: app=clock + Labels: app=web ``` - This is added by `kubectl create deployment` @@ -71,7 +71,7 @@ So, what do we get? - Display its information: ```bash - kubectl describe pod clock-xxxxxxxxxx-yyyyy + kubectl describe pod web-xxxxxxxxxx-yyyyy ``` ] @@ -84,11 +84,11 @@ So, what do we get? - We see two labels: ``` - Labels: app=clock + Labels: app=web pod-template-hash=xxxxxxxxxx ``` -- `app=clock` comes from `kubectl create deployment` too +- `app=web` comes from `kubectl create deployment` too - `pod-template-hash` was assigned by the Replica Set @@ -109,9 +109,9 @@ So, what do we get? .exercise[ -- List all the pods with at least `app=clock`: +- List all the pods with at least `app=web`: ```bash - kubectl get pods --selector=app=clock + kubectl get pods --selector=app=web ``` - List all the pods with a label `app`, regardless of its value: @@ -129,14 +129,14 @@ So, what do we get? .exercise[ -- Set a label on the `clock` Deployment: +- Set a label on the `web` Deployment: ```bash - kubectl label deployment clock color=blue + kubectl label deployment web color=blue ``` - Check it out: ```bash - kubectl describe deployment clock + kubectl describe deployment web ``` ] @@ -155,7 +155,7 @@ class: extra-details We can also use negative selectors - Example: `--selector=app!=clock` + Example: `--selector=app!=web` - Selectors can be used with most `kubectl` commands @@ -195,3 +195,17 @@ class: extra-details - Maximum length isn't defined (dozens of kilobytes is fine, hundreds maybe not so much) + +--- + +## Cleanup web deployment + +- Time to clean up web and move on + +.exercise[ + + - delete the web deployment + ```bash + kubectl delete deployment web + ``` +] \ No newline at end of file diff --git a/slides/k8s/scalingdockercoins.md b/slides/k8s/scalingdockercoins.md index 36ce26740..a84883be6 100644 --- a/slides/k8s/scalingdockercoins.md +++ b/slides/k8s/scalingdockercoins.md @@ -158,28 +158,9 @@ class: extra-details ``` httping ip.ad.dr.ess ``` +-- -- We will use `httping` on the ClusterIP addresses of our services - ---- - -## Obtaining ClusterIP addresses - -- We can simply check the output of `kubectl get services` - -- Or do it programmatically, as in the example below - -.exercise[ - -- Retrieve the IP addresses: - ```bash - HASHER=$(kubectl get svc hasher -o go-template={{.spec.clusterIP}}) - RNG=$(kubectl get svc rng -o go-template={{.spec.clusterIP}}) - ``` - -] - -Now we can access the IP addresses of our services through `$HASHER` and `$RNG`. +We can use the `shpod` we started earlier to run `httping` on the ClusterIP addresses of our services. That way we don't need to expose them to the internet. --- @@ -189,8 +170,8 @@ Now we can access the IP addresses of our services through `$HASHER` and `$RNG`. - Check the response times for both services: ```bash - httping -c 3 $HASHER - httping -c 3 $RNG + kubectl exec -ti shpod -- httping -c 3 hasher + kubectl exec -ti shpod -- httping -c 3 rng ``` ] diff --git a/slides/kube-fullday-namespaced-pks.yml.no b/slides/kube-fullday-namespaced-pks.yml.no new file mode 100644 index 000000000..4dd0fb854 --- /dev/null +++ b/slides/kube-fullday-namespaced-pks.yml.no @@ -0,0 +1,89 @@ +title: | + Config Management Camp - Kubernetes Workshop + +#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" +#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" +chat: "In person!" + +gitrepo: github.com/paulczar/container.training + +slides: https://k8s.camp/cfgcamp/ + +exclude: +- self-paced + +content: +- pks/title.md +- pks/logistics.md +- k8s/intro.md +- shared/about-slides.md +- shared/toc.md +- + - pks/prereqs.md + #- shared/webssh.md + - pks/connecting.md + # - k8s/versions-k8s.md + #- shared/sampleapp.md + #- shared/composescale.md + #- shared/hastyconclusions.md + #- shared/composedown.md +- + - k8s/kubectlrun.md + - k8s/logs-cli.md + - shared/declarative.md + - k8s/declarative.md + - k8s/deploymentslideshow.md + - k8s/kubenet.md + - pks/kubectlexpose.md + - k8s/shippingimages.md + #- k8s/buildshiprun-selfhosted.md + - k8s/buildshiprun-dockerhub.md + - pks/ourapponkube.md + #- k8s/kubectlproxy.md + #- k8s/localkubeconfig.md + #- k8s/accessinternal.md +- + - pks/setup-k8s.md + - pks/dashboard.md + - pks/octant.md + #- k8s/kubectlscale.md + - pks/scalingdockercoins.md + - shared/hastyconclusions.md + - k8s/daemonset.md + - k8s/rollout.md + #- k8s/healthchecks.md + #- k8s/healthchecks-more.md + #- k8s/record.md +- + #- k8s/namespaces.md + - pks/ingress.md + - pks/cleanup-dockercoins.md + #- k8s/kustomize.md + #- k8s/helm.md + #- k8s/create-chart.md + #- k8s/netpol.md + #- k8s/authn-authz.md + #- k8s/csr-api.md + #- k8s/openid-connect.md + #- k8s/podsecuritypolicy.md + - k8s/volumes.md + #- k8s/build-with-docker.md + #- k8s/build-with-kaniko.md + - k8s/configuration.md + #- k8s/logs-centralized.md + #- k8s/prometheus.md + #- k8s/statefulsets.md + #- k8s/local-persistent-volumes.md + #- k8s/portworx.md + #- k8s/extending-api.md + #- k8s/operators.md + #- k8s/operators-design.md + #- k8s/staticpods.md + #- k8s/owners-and-dependents.md + #- k8s/gitworkflows.md + - pks/helm.md + - pks/helm-wordpress.md +- + - k8s/whatsnext.md + - k8s/links.md + - shared/thankyou.md diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml index 1b60995ec..b4ee2a557 100644 --- a/slides/kube-fullday-namespaced.yml +++ b/slides/kube-fullday-namespaced.yml @@ -1,72 +1,95 @@ title: | - Config Management Camp - Kubernetes Workshop + Deploying and Scaling Microservices + with Kubernetes #chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" #chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" chat: "In person!" -gitrepo: github.com/paulczar/container.training +gitrepo: github.com/jpetazzo/container.training -slides: https://k8s.camp/cfgcamp/ +slides: http://container.training/ + +#slidenumberprefix: "#SomeHashTag — " exclude: - self-paced -chapters: -- pks/title.md -- pks/logistics.md +content: +- shared/title.md +- logistics.md - k8s/intro.md - shared/about-slides.md +- shared/chat-room-im.md +#- shared/chat-room-zoom.md - shared/toc.md - - - pks/prereqs.md + - shared/prereqs.md + - namespaced/handson.md #- shared/webssh.md - - pks/connecting.md - # - k8s/versions-k8s.md - #- shared/sampleapp.md + - namespaced/connecting.md + #- k8s/versions-k8s.md + - namespaced/sampleapp.md #- shared/composescale.md #- shared/hastyconclusions.md - #- shared/composedown.md + - shared/composedown.md + - k8s/concepts-k8s.md + - k8s/kubectlget.md - - - k8s/kubectlrun.md + - k8s/kubectl-run.md + - k8s/batch-jobs.md + - k8s/labels-annotations.md + - k8s/kubectl-logs.md - k8s/logs-cli.md - shared/declarative.md - k8s/declarative.md - k8s/deploymentslideshow.md - k8s/kubenet.md - - pks/kubectlexpose.md + - k8s/kubectlexpose.md - k8s/shippingimages.md #- k8s/buildshiprun-selfhosted.md - k8s/buildshiprun-dockerhub.md - - pks/ourapponkube.md - #- k8s/kubectlproxy.md - #- k8s/localkubeconfig.md - #- k8s/accessinternal.md + - namespaced/ourapponkube.md + #- k8s/exercise-wordsmith.md - - - pks/setup-k8s.md - - pks/dashboard.md - - pks/octant.md + - k8s/yamldeploy.md + # - k8s/setup-k8s.md + #- k8s/dashboard.md #- k8s/kubectlscale.md - - pks/scalingdockercoins.md + - k8s/scalingdockercoins.md - shared/hastyconclusions.md - k8s/daemonset.md + #- k8s/dryrun.md + #- k8s/exercise-yaml.md + #- k8s/localkubeconfig.md + #- k8s/accessinternal.md + #- k8s/kubectlproxy.md - k8s/rollout.md #- k8s/healthchecks.md #- k8s/healthchecks-more.md #- k8s/record.md - - #- k8s/namespaces.md - - pks/ingress.md - - pks/cleanup-dockercoins.md + # TODO - Update namespaces section to explain, but not do excercises + # as user will not have permissions to create ns. + # - k8s/namespaces.md + # TODO - Update to show nginx or generic Ingress vs Traefik specific. + # - k8s/ingress.md #- k8s/kustomize.md - #- k8s/helm.md + #- k8s/helm-intro.md + #- k8s/helm-chart-format.md + #- k8s/helm-create-basic-chart.md + #- k8s/helm-create-better-chart.md + #- k8s/helm-secrets.md + #- k8s/exercise-helm.md #- k8s/create-chart.md + #- k8s/create-more-charts.md #- k8s/netpol.md #- k8s/authn-authz.md #- k8s/csr-api.md #- k8s/openid-connect.md #- k8s/podsecuritypolicy.md - k8s/volumes.md + #- k8s/exercise-configmap.md #- k8s/build-with-docker.md #- k8s/build-with-kaniko.md - k8s/configuration.md @@ -81,9 +104,8 @@ chapters: #- k8s/staticpods.md #- k8s/owners-and-dependents.md #- k8s/gitworkflows.md - - pks/helm.md - - pks/helm-wordpress.md - - k8s/whatsnext.md + - k8s/lastwords.md - k8s/links.md - shared/thankyou.md diff --git a/slides/kube-fullday.yml b/slides/kube-fullday.yml index 4315f930a..eb27d1bc8 100644 --- a/slides/kube-fullday.yml +++ b/slides/kube-fullday.yml @@ -25,6 +25,7 @@ content: - shared/toc.md - - shared/prereqs.md + - shared/handson.md #- shared/webssh.md - shared/connecting.md #- k8s/versions-k8s.md diff --git a/slides/kube-halfday.yml b/slides/kube-halfday.yml index f498ea00a..3f2cc2f98 100644 --- a/slides/kube-halfday.yml +++ b/slides/kube-halfday.yml @@ -26,6 +26,7 @@ content: #- shared/chat-room-zoom.md - shared/toc.md - - shared/prereqs.md + - shared/handson.md #- shared/webssh.md - shared/connecting.md - k8s/versions-k8s.md diff --git a/slides/kube-twodays.yml b/slides/kube-twodays.yml index d4c461e75..d1ca8eab6 100644 --- a/slides/kube-twodays.yml +++ b/slides/kube-twodays.yml @@ -25,6 +25,7 @@ content: - shared/toc.md - - shared/prereqs.md + - shared/handson.md #- shared/webssh.md - shared/connecting.md #- k8s/versions-k8s.md diff --git a/slides/namespaced/connecting.md b/slides/namespaced/connecting.md new file mode 100644 index 000000000..a75555c78 --- /dev/null +++ b/slides/namespaced/connecting.md @@ -0,0 +1,106 @@ +class: in-person + +## Connecting to our lab environment + +.exercise[ + +- Log into the provided URL with your provided credentials. + +- Follow the instructions on the auth portal to set up a `kubeconfig` file. + +- Check that you can connect to the cluster with `kubectl cluster-info`: + +```bash +$ kubectl cluster-info +Kubernetes master is running at https://k8s.cluster1.xxxx:8443 +CoreDNS is running at https://k8s.cluster1.xxxx:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy +``` +] + +If anything goes wrong — ask for help! + +--- + +## Role Based Authorization Control + +You are restricted to a subset of Kubernetes resources in your own namespace. Just like in a real world enterprise cluster. + + +.exercise[ + +1\. Can you create pods? + +``` +$ kubectl auth can-i create pods +``` + +2\. Can you delete namespaces? + +``` +$ kubectl auth can-i delete namespaces +``` +] +-- + +1. You can create pods in your own namespace. +2. You cannot delete namespaces. +--- + +## Doing or re-doing the workshop on your own? + +- Use something like + [Play-With-Docker](http://play-with-docker.com/) or + [Play-With-Kubernetes](https://training.play-with-kubernetes.com/) + + Zero setup effort; but environment are short-lived and + might have limited resources + +- Create your own cluster (local or cloud VMs) + + Small setup effort; small cost; flexible environments + +- Create a bunch of clusters for you and your friends + ([instructions](https://@@GITREPO@@/tree/master/prepare-vms)) + + Bigger setup effort; ideal for group training + +--- + +class: self-paced + +## Get your own Docker nodes + +- If you already have some Docker nodes: great! + +- If not: let's get some thanks to Play-With-Docker + +.exercise[ + +- Go to http://www.play-with-docker.com/ + +- Log in + +- Create your first node + + + +] + +You will need a Docker ID to use Play-With-Docker. + +(Creating a Docker ID is free.) + +--- + +## Terminals + +Once in a while, the instructions will say: +
"Open a new terminal." + +There are multiple ways to do this: + +- create a new window or tab on your machine, and SSH into the VM; + +- use screen or tmux on the VM and open a new window from there. + +You are welcome to use the method that you feel the most comfortable with. diff --git a/slides/namespaced/handson.md b/slides/namespaced/handson.md new file mode 100644 index 000000000..89abef0f6 --- /dev/null +++ b/slides/namespaced/handson.md @@ -0,0 +1,136 @@ +## Hands-on sections + +- The whole workshop is hands-on + +- We are going to build, ship, and run containers! + +- You are invited to reproduce all the demos + +- All hands-on sections are clearly identified, like the gray rectangle below + +.exercise[ + +- This is the stuff you're supposed to do! + +- Go to @@SLIDES@@ to view these slides + + + +] + +--- + +class: in-person + +## Where are we going to run our containers? + +--- + +class: in-person, pic + +![You get a namespace](images/you-get-a-namespace.jpg) + +--- + +class: in-person + +## You get your own namespace + +- We have one big Kubernetes cluster + +- Each person gets a private namespace (not shared with anyone else) + +- They'll remain up for the duration of the workshop + +- You should have a little card with login+password+IP addresses + +- The namespace is the same as your login and should be set automatically when you log in. + + +--- + +class: in-person + +## Why don't we run containers locally? + +- Installing this stuff can be hard on some machines + + (32 bits CPU or OS... Laptops without administrator access... etc.) + +- *"The whole team downloaded all these container images from the WiFi! +
... and it went great!"* (Literally no-one ever) + +- All you need is a computer (or even a phone or tablet!), with: + + - an internet connection + + - a web browser + + - an SSH client + +--- + +class: in-person + +## SSH clients + +_If needed_ + +- On Linux, OS X, FreeBSD... you are probably all set + +- On Windows, get one of these: + + - [putty](http://www.putty.org/) + - Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH) + - [Git BASH](https://git-for-windows.github.io/) + - [MobaXterm](http://mobaxterm.mobatek.net/) + +- On Android, [JuiceSSH](https://juicessh.com/) + ([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh)) + works pretty well + +- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets + +--- + +class: in-person, extra-details + +## What is this Mosh thing? + +*You don't have to use Mosh or even know about it to follow along. +
+We're just telling you about it because some of us think it's cool!* + +- Mosh is "the mobile shell" + +- It is essentially SSH over UDP, with roaming features + +- It retransmits packets quickly, so it works great even on lossy connections + + (Like hotel or conference WiFi) + +- It has intelligent local echo, so it works great even in high-latency connections + + (Like hotel or conference WiFi) + +- It supports transparent roaming when your client IP address changes + + (Like when you hop from hotel to conference WiFi) + +--- + +class: in-person, extra-details + +## Using Mosh + +- To install it: `(apt|yum|brew) install mosh` + +- It has been pre-installed on the VMs that we are using + +- To connect to a remote machine: `mosh user@host` + + (It is going to establish an SSH connection, then hand off to UDP) + +- It requires UDP ports to be open + + (By default, it uses a UDP port between 60000 and 61000) diff --git a/slides/namespaced/ourapponkube.md b/slides/namespaced/ourapponkube.md new file mode 100644 index 000000000..00d9d70a4 --- /dev/null +++ b/slides/namespaced/ourapponkube.md @@ -0,0 +1,162 @@ +# Running our application on Kubernetes + +- We can now deploy our code (as well as a redis instance) + +.exercise[ + +- Deploy `redis`: + ```bash + kubectl create deployment redis --image=redis + ``` + +- Deploy everything else: + ```bash + kubectl create deployment hasher --image=dockercoins/hasher:v0.1 + kubectl create deployment rng --image=dockercoins/rng:v0.1 + kubectl create deployment webui --image=dockercoins/webui:v0.1 + kubectl create deployment worker --image=dockercoins/worker:v0.1 + ``` + +] + +--- + +class: extra-details + +## Deploying other images + +- If we wanted to deploy images from another registry ... + +- ... Or with a different tag ... + +- ... We could use the following snippet: + +```bash + REGISTRY=dockercoins + TAG=v0.1 + for SERVICE in hasher rng webui worker; do + kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG + done +``` + +--- + +## Is this working? + +- After waiting for the deployment to complete, let's look at the logs! + + (Hint: use `kubectl get deploy -w` to watch deployment events) + +.exercise[ + + + +- Look at some logs: + ```bash + kubectl logs deploy/rng + kubectl logs deploy/worker + ``` + +] + +-- + +🤔 `rng` is fine ... But not `worker`. + +-- + +💡 Oh right! We forgot to `expose`. + +--- + +## Connecting containers together + +- Three deployments need to be reachable by others: `hasher`, `redis`, `rng` + +- `worker` doesn't need to be exposed + +- `webui` will be dealt with later + +.exercise[ + +- Expose each deployment, specifying the right port: + ```bash + kubectl expose deployment redis --port 6379 + kubectl expose deployment rng --port 80 + kubectl expose deployment hasher --port 80 + ``` + +] + +--- + +## Is this working yet? + +- The `worker` has an infinite loop, that retries 10 seconds after an error + +.exercise[ + +- Stream the worker's logs: + ```bash + kubectl logs deploy/worker --follow + ``` + + (Give it about 10 seconds to recover) + + + +] + +-- + +We should now see the `worker`, well, working happily. + +--- + +## Exposing services for external access + +- Now we would like to access the Web UI + +- We will expose it with a `LoadBalancer` + +.exercise[ + +- Create a `NodePort` service for the Web UI: + ```bash + kubectl expose deploy/webui --type=LoadBalancer --port=80 + ``` + +- Check the results that was allocated: + ```bash + kubectl get svc + ``` +] +-- + +Wait a few moments and rerun `kubectl get svc` you should see the `EXTERNAL-IP` go from *pending* to an IP address. + +--- + +## Accessing the web UI + +- We can now connect to the `EXTERNAL-IP` of the allocated load balancer + +.exercise[ + +- Get the `EXTERNAL-IP`: + ```bash + LB=$(kubectl get svc httpenv -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + +- Open the web UI in your browser + ```bash + firefox http://$LB + ``` + +] diff --git a/slides/namespaced/sampleapp.md b/slides/namespaced/sampleapp.md new file mode 100644 index 000000000..e903e3e2c --- /dev/null +++ b/slides/namespaced/sampleapp.md @@ -0,0 +1,359 @@ +# Our sample application + +- We will clone the GitHub repository into our workspace + +> _If you were provided a SSH host, SSH to that first._ + +- The repository also contains scripts and tools that we will use through the workshop + +.exercise[ + + + +- Clone the repository: + ```bash + git clone https://@@GITREPO@@ + ``` + +] + +(You can also fork the repository on GitHub and clone your fork if you prefer that.) + +--- + +## Downloading and running the application + +Let's start this before we look around, as downloading will take a little time... + +> _Only do this if you were provided a SSH host, or your instructor says there is enough bandwidth._ + +.exercise[ + +- Go to the `dockercoins` directory, in the cloned repo: + ```bash + cd ~/container.training/dockercoins + ``` + +- Use Compose to build and run all containers: + ```bash + docker-compose up + ``` + + + +] + +Compose tells Docker to build all container images (pulling +the corresponding base images), then starts all containers, +and displays aggregated logs. + +--- + +## What's this application? + +-- + +- It is a DockerCoin miner! .emoji[💰🐳📦🚢] + +-- + +- No, you can't buy coffee with DockerCoins + +-- + +- How DockerCoins works: + + - generate a few random bytes + + - hash these bytes + + - increment a counter (to keep track of speed) + + - repeat forever! + +-- + +- DockerCoins is *not* a cryptocurrency + + (the only common points are "randomness," "hashing," and "coins" in the name) + +--- + +## DockerCoins in the microservices era + +- DockerCoins is made of 5 services: + + - `rng` = web service generating random bytes + + - `hasher` = web service computing hash of POSTed data + + - `worker` = background process calling `rng` and `hasher` + + - `webui` = web interface to watch progress + + - `redis` = data store (holds a counter updated by `worker`) + +- These 5 services are visible in the application's Compose file, + [docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) + +--- + +## How DockerCoins works + +- `worker` invokes web service `rng` to generate random bytes + +- `worker` invokes web service `hasher` to hash these bytes + +- `worker` does this in an infinite loop + +- every second, `worker` updates `redis` to indicate how many loops were done + +- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser + +*(See diagram on next slide!)* + +--- + +class: pic + +![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg) + +--- + +## Service discovery in container-land + +How does each service find out the address of the other ones? + +-- + +- We do not hard-code IP addresses in the code + +- We do not hard-code FQDNs in the code, either + +- We just connect to a service name, and container-magic does the rest + + (And by container-magic, we mean "a crafty, dynamic, embedded DNS server") + +--- + +## Example in `worker/worker.py` + +```python +redis = Redis("`redis`") + + +def get_random_bytes(): + r = requests.get("http://`rng`/32") + return r.content + + +def hash_bytes(data): + r = requests.post("http://`hasher`/", + data=data, + headers={"Content-Type": "application/octet-stream"}) +``` + +(Full source code available [here]( +https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17 +)) + +--- + +class: extra-details + +## Links, naming, and service discovery + +- Containers can have network aliases (resolvable through DNS) + +- Compose file version 2+ makes each container reachable through its service name + +- Compose file version 1 required "links" sections to accomplish this + +- Network aliases are automatically namespaced + + - you can have multiple apps declaring and using a service named `database` + + - containers in the blue app will resolve `database` to the IP of the blue database + + - containers in the green app will resolve `database` to the IP of the green database + +--- + +## Show me the code! + +- You can check the GitHub repository with all the materials of this workshop: +
https://@@GITREPO@@ + +- The application is in the [dockercoins]( + https://@@GITREPO@@/tree/master/dockercoins) + subdirectory + +- The Compose file ([docker-compose.yml]( + https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)) + lists all 5 services + +- `redis` is using an official image from the Docker Hub + +- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile + +- Each service's Dockerfile and source code is in its own directory + + (`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory, + `rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/) + directory, etc.) + +--- + +class: extra-details + +## Compose file format version + +*This is relevant only if you have used Compose before 2016...* + +- Compose 1.6 introduced support for a new Compose file format (aka "v2") + +- Services are no longer at the top level, but under a `services` section + +- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer) + +- Containers are placed on a dedicated network, making links unnecessary + +- There are other minor differences, but upgrade is easy and straightforward + +--- + +## Our application at work + +- On the left-hand side, the "rainbow strip" shows the container names + +- On the right-hand side, we see the output of our containers + +- We can see the `worker` service making requests to `rng` and `hasher` + +- For `rng` and `hasher`, we see HTTP access logs + +--- + +## Connecting to the web UI + +- "Logs are exciting and fun!" (No-one, ever) + +- The `webui` container exposes a web dashboard; let's view it + +.exercise[ + +- With a web browser, connect to `localhost` on port 8000 + +- Remember: the `nodeX` aliases are valid only on the nodes themselves + +- In your browser, you need to enter the IP address of your node + + + +] + +A drawing area should show up, and after a few seconds, a blue +graph will appear. + +--- + +class: self-paced, extra-details + +## If the graph doesn't load + +If you just see a `Page not found` error, it might be because your +Docker Engine is running on a different machine. This can be the case if: + +- you are using the Docker Toolbox + +- you are using a VM (local or remote) created with Docker Machine + +- you are controlling a remote Docker Engine + +When you run DockerCoins in development mode, the web UI static files +are mapped to the container using a volume. Alas, volumes can only +work on a local environment, or when using Docker Desktop for Mac or Windows. + +How to fix this? + +Stop the app with `^C`, edit `dockercoins.yml`, comment out the `volumes` section, and try again. + +--- + +class: extra-details + +## Why does the speed seem irregular? + +- It *looks like* the speed is approximately 4 hashes/second + +- Or more precisely: 4 hashes/second, with regular dips down to zero + +- Why? + +-- + +class: extra-details + +- The app actually has a constant, steady speed: 3.33 hashes/second +
+ (which corresponds to 1 hash every 0.3 seconds, for *reasons*) + +- Yes, and? + +--- + +class: extra-details + +## The reason why this graph is *not awesome* + +- The worker doesn't update the counter after every loop, but up to once per second + +- The speed is computed by the browser, checking the counter about once per second + +- Between two consecutive updates, the counter will increase either by 4, or by 0 + +- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - 0 etc. + +- What can we conclude from this? + +-- + +class: extra-details + +- "I'm clearly incapable of writing good frontend code!" 😀 — Jérôme + +--- + +## Stopping the application + +- If we interrupt Compose (with `^C`), it will politely ask the Docker Engine to stop the app + +- The Docker Engine will send a `TERM` signal to the containers + +- If the containers do not exit in a timely manner, the Engine sends a `KILL` signal + +.exercise[ + +- Stop the application by hitting `^C` + + + +] + +-- + +Some containers exit immediately, others take longer. + +The containers that do not handle `SIGTERM` end up being killed after a 10s timeout. If we are very impatient, we can hit `^C` a second time! diff --git a/slides/shared/handson.md b/slides/shared/handson.md new file mode 100644 index 000000000..6fa09ab61 --- /dev/null +++ b/slides/shared/handson.md @@ -0,0 +1,133 @@ +## Hands-on sections + +- The whole workshop is hands-on + +- We are going to build, ship, and run containers! + +- You are invited to reproduce all the demos + +- All hands-on sections are clearly identified, like the gray rectangle below + +.exercise[ + +- This is the stuff you're supposed to do! + +- Go to @@SLIDES@@ to view these slides + + + +] + +--- + +class: in-person + +## Where are we going to run our containers? + +--- + +class: in-person, pic + +![You get a cluster](images/you-get-a-cluster.jpg) + +--- + +class: in-person + +## You get a cluster of cloud VMs + +- Each person gets a private cluster of cloud VMs (not shared with anybody else) + +- They'll remain up for the duration of the workshop + +- You should have a little card with login+password+IP addresses + +- You can automatically SSH from one VM to another + +- The nodes have aliases: `node1`, `node2`, etc. + +--- + +class: in-person + +## Why don't we run containers locally? + +- Installing this stuff can be hard on some machines + + (32 bits CPU or OS... Laptops without administrator access... etc.) + +- *"The whole team downloaded all these container images from the WiFi! +
... and it went great!"* (Literally no-one ever) + +- All you need is a computer (or even a phone or tablet!), with: + + - an internet connection + + - a web browser + + - an SSH client + +--- + +class: in-person + +## SSH clients + +- On Linux, OS X, FreeBSD... you are probably all set + +- On Windows, get one of these: + + - [putty](http://www.putty.org/) + - Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH) + - [Git BASH](https://git-for-windows.github.io/) + - [MobaXterm](http://mobaxterm.mobatek.net/) + +- On Android, [JuiceSSH](https://juicessh.com/) + ([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh)) + works pretty well + +- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets + +--- + +class: in-person, extra-details + +## What is this Mosh thing? + +*You don't have to use Mosh or even know about it to follow along. +
+We're just telling you about it because some of us think it's cool!* + +- Mosh is "the mobile shell" + +- It is essentially SSH over UDP, with roaming features + +- It retransmits packets quickly, so it works great even on lossy connections + + (Like hotel or conference WiFi) + +- It has intelligent local echo, so it works great even in high-latency connections + + (Like hotel or conference WiFi) + +- It supports transparent roaming when your client IP address changes + + (Like when you hop from hotel to conference WiFi) + +--- + +class: in-person, extra-details + +## Using Mosh + +- To install it: `(apt|yum|brew) install mosh` + +- It has been pre-installed on the VMs that we are using + +- To connect to a remote machine: `mosh user@host` + + (It is going to establish an SSH connection, then hand off to UDP) + +- It requires UDP ports to be open + + (By default, it uses a UDP port between 60000 and 61000) diff --git a/slides/shared/prereqs.md b/slides/shared/prereqs.md index 78c9b0795..3c3824dde 100644 --- a/slides/shared/prereqs.md +++ b/slides/shared/prereqs.md @@ -31,139 +31,3 @@ class: title Misattributed to Benjamin Franklin [(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/) - ---- - -## Hands-on sections - -- The whole workshop is hands-on - -- We are going to build, ship, and run containers! - -- You are invited to reproduce all the demos - -- All hands-on sections are clearly identified, like the gray rectangle below - -.exercise[ - -- This is the stuff you're supposed to do! - -- Go to @@SLIDES@@ to view these slides - - - -] - ---- - -class: in-person - -## Where are we going to run our containers? - ---- - -class: in-person, pic - -![You get a cluster](images/you-get-a-cluster.jpg) - ---- - -class: in-person - -## You get a cluster of cloud VMs - -- Each person gets a private cluster of cloud VMs (not shared with anybody else) - -- They'll remain up for the duration of the workshop - -- You should have a little card with login+password+IP addresses - -- You can automatically SSH from one VM to another - -- The nodes have aliases: `node1`, `node2`, etc. - ---- - -class: in-person - -## Why don't we run containers locally? - -- Installing this stuff can be hard on some machines - - (32 bits CPU or OS... Laptops without administrator access... etc.) - -- *"The whole team downloaded all these container images from the WiFi! -
... and it went great!"* (Literally no-one ever) - -- All you need is a computer (or even a phone or tablet!), with: - - - an internet connection - - - a web browser - - - an SSH client - ---- - -class: in-person - -## SSH clients - -- On Linux, OS X, FreeBSD... you are probably all set - -- On Windows, get one of these: - - - [putty](http://www.putty.org/) - - Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH) - - [Git BASH](https://git-for-windows.github.io/) - - [MobaXterm](http://mobaxterm.mobatek.net/) - -- On Android, [JuiceSSH](https://juicessh.com/) - ([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh)) - works pretty well - -- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets - ---- - -class: in-person, extra-details - -## What is this Mosh thing? - -*You don't have to use Mosh or even know about it to follow along. -
-We're just telling you about it because some of us think it's cool!* - -- Mosh is "the mobile shell" - -- It is essentially SSH over UDP, with roaming features - -- It retransmits packets quickly, so it works great even on lossy connections - - (Like hotel or conference WiFi) - -- It has intelligent local echo, so it works great even in high-latency connections - - (Like hotel or conference WiFi) - -- It supports transparent roaming when your client IP address changes - - (Like when you hop from hotel to conference WiFi) - ---- - -class: in-person, extra-details - -## Using Mosh - -- To install it: `(apt|yum|brew) install mosh` - -- It has been pre-installed on the VMs that we are using - -- To connect to a remote machine: `mosh user@host` - - (It is going to establish an SSH connection, then hand off to UDP) - -- It requires UDP ports to be open - - (By default, it uses a UDP port between 60000 and 61000) diff --git a/slides/spring-one-tour.yml b/slides/spring-one-tour.yml.no similarity index 100% rename from slides/spring-one-tour.yml rename to slides/spring-one-tour.yml.no From 7c368386ec7ef3721f4faa2f2ebb8db2feca9d83 Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Thu, 30 Apr 2020 12:59:02 -0500 Subject: [PATCH 13/14] remove all pks references Signed-off-by: Paul Czarkowski --- slides/kube-fullday-namespaced-pks.yml.no | 89 ---- slides/pks/cleanup-dockercoins.md | 12 - slides/pks/concepts-k8s.md | 257 --------- slides/pks/connecting.md | 106 ---- slides/pks/dashboard.md | 116 ---- slides/pks/helm-intro.md | 346 ------------ slides/pks/helm-wordpress.md | 109 ---- slides/pks/helm.md | 217 -------- slides/pks/httpenv-update.md | 387 -------------- slides/pks/ingress.md | 247 --------- slides/pks/kubectlexpose.md | 398 -------------- slides/pks/kubectlget.md | 359 ------------- slides/pks/kubectlrun.md | 614 ---------------------- slides/pks/kubercoins.md | 244 --------- slides/pks/logistics.md | 11 - slides/pks/logs-centralized.md | 147 ------ slides/pks/octant.md | 17 - slides/pks/ourapponkube.md | 139 ----- slides/pks/prereqs.md | 114 ---- slides/pks/sampleapp.md | 145 ----- slides/pks/scalingdockercoins.md | 241 --------- slides/pks/security-kubectl-apply.md | 52 -- slides/pks/setup-k8s.md | 108 ---- slides/pks/title.md | 23 - slides/pks/wp/values.yaml | 18 - slides/spring-one-tour.yml.no | 62 --- 26 files changed, 4578 deletions(-) delete mode 100644 slides/kube-fullday-namespaced-pks.yml.no delete mode 100644 slides/pks/cleanup-dockercoins.md delete mode 100644 slides/pks/concepts-k8s.md delete mode 100644 slides/pks/connecting.md delete mode 100644 slides/pks/dashboard.md delete mode 100644 slides/pks/helm-intro.md delete mode 100644 slides/pks/helm-wordpress.md delete mode 100644 slides/pks/helm.md delete mode 100644 slides/pks/httpenv-update.md delete mode 100644 slides/pks/ingress.md delete mode 100644 slides/pks/kubectlexpose.md delete mode 100644 slides/pks/kubectlget.md delete mode 100644 slides/pks/kubectlrun.md delete mode 100644 slides/pks/kubercoins.md delete mode 100644 slides/pks/logistics.md delete mode 100644 slides/pks/logs-centralized.md delete mode 100644 slides/pks/octant.md delete mode 100644 slides/pks/ourapponkube.md delete mode 100644 slides/pks/prereqs.md delete mode 100644 slides/pks/sampleapp.md delete mode 100644 slides/pks/scalingdockercoins.md delete mode 100644 slides/pks/security-kubectl-apply.md delete mode 100644 slides/pks/setup-k8s.md delete mode 100644 slides/pks/title.md delete mode 100644 slides/pks/wp/values.yaml delete mode 100644 slides/spring-one-tour.yml.no diff --git a/slides/kube-fullday-namespaced-pks.yml.no b/slides/kube-fullday-namespaced-pks.yml.no deleted file mode 100644 index 4dd0fb854..000000000 --- a/slides/kube-fullday-namespaced-pks.yml.no +++ /dev/null @@ -1,89 +0,0 @@ -title: | - Config Management Camp - Kubernetes Workshop - -#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" -#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)" -chat: "In person!" - -gitrepo: github.com/paulczar/container.training - -slides: https://k8s.camp/cfgcamp/ - -exclude: -- self-paced - -content: -- pks/title.md -- pks/logistics.md -- k8s/intro.md -- shared/about-slides.md -- shared/toc.md -- - - pks/prereqs.md - #- shared/webssh.md - - pks/connecting.md - # - k8s/versions-k8s.md - #- shared/sampleapp.md - #- shared/composescale.md - #- shared/hastyconclusions.md - #- shared/composedown.md -- - - k8s/kubectlrun.md - - k8s/logs-cli.md - - shared/declarative.md - - k8s/declarative.md - - k8s/deploymentslideshow.md - - k8s/kubenet.md - - pks/kubectlexpose.md - - k8s/shippingimages.md - #- k8s/buildshiprun-selfhosted.md - - k8s/buildshiprun-dockerhub.md - - pks/ourapponkube.md - #- k8s/kubectlproxy.md - #- k8s/localkubeconfig.md - #- k8s/accessinternal.md -- - - pks/setup-k8s.md - - pks/dashboard.md - - pks/octant.md - #- k8s/kubectlscale.md - - pks/scalingdockercoins.md - - shared/hastyconclusions.md - - k8s/daemonset.md - - k8s/rollout.md - #- k8s/healthchecks.md - #- k8s/healthchecks-more.md - #- k8s/record.md -- - #- k8s/namespaces.md - - pks/ingress.md - - pks/cleanup-dockercoins.md - #- k8s/kustomize.md - #- k8s/helm.md - #- k8s/create-chart.md - #- k8s/netpol.md - #- k8s/authn-authz.md - #- k8s/csr-api.md - #- k8s/openid-connect.md - #- k8s/podsecuritypolicy.md - - k8s/volumes.md - #- k8s/build-with-docker.md - #- k8s/build-with-kaniko.md - - k8s/configuration.md - #- k8s/logs-centralized.md - #- k8s/prometheus.md - #- k8s/statefulsets.md - #- k8s/local-persistent-volumes.md - #- k8s/portworx.md - #- k8s/extending-api.md - #- k8s/operators.md - #- k8s/operators-design.md - #- k8s/staticpods.md - #- k8s/owners-and-dependents.md - #- k8s/gitworkflows.md - - pks/helm.md - - pks/helm-wordpress.md -- - - k8s/whatsnext.md - - k8s/links.md - - shared/thankyou.md diff --git a/slides/pks/cleanup-dockercoins.md b/slides/pks/cleanup-dockercoins.md deleted file mode 100644 index 8fa5bce1a..000000000 --- a/slides/pks/cleanup-dockercoins.md +++ /dev/null @@ -1,12 +0,0 @@ -# Let's do some housekeeping - -- We've created a lot of resources, let's clean them up. - -.exercise[ - - Delete resources: - ```bash - kubectl delete deployment,svc hasher redis rng webui - kubectl delete deployment worker - kubectl delete ingress webui - kubectl delete daemonset rng -] diff --git a/slides/pks/concepts-k8s.md b/slides/pks/concepts-k8s.md deleted file mode 100644 index edb6f1380..000000000 --- a/slides/pks/concepts-k8s.md +++ /dev/null @@ -1,257 +0,0 @@ -# Kubernetes concepts - -- Kubernetes is a container management system - -- It runs and manages containerized applications on a cluster - --- - -- What does that really mean? - ---- - -## Basic things we can ask Kubernetes to do - --- - -- Start 5 containers using image `atseashop/api:v1.3` - --- - -- Place an internal load balancer in front of these containers - --- - -- Start 10 containers using image `atseashop/webfront:v1.3` - --- - -- Place a public load balancer in front of these containers - --- - -- It's Black Friday (or Christmas), traffic spikes, grow our cluster and add containers - --- - -- New release! Replace my containers with the new image `atseashop/webfront:v1.4` - --- - -- Keep processing requests during the upgrade; update my containers one at a time - ---- - -## Other things that Kubernetes can do for us - -- Basic autoscaling - -- Blue/green deployment, canary deployment - -- Long running services, but also batch (one-off) jobs - -- Overcommit our cluster and *evict* low-priority jobs - -- Run services with *stateful* data (databases etc.) - -- Fine-grained access control defining *what* can be done by *whom* on *which* resources - -- Integrating third party services (*service catalog*) - -- Automating complex tasks (*operators*) - ---- - -## Kubernetes architecture - ---- - -class: pic - -![haha only kidding](images/k8s-arch1.png) - ---- - -## Kubernetes architecture - -- Ha ha ha ha - -- OK, I was trying to scare you, it's much simpler than that ❤️ - ---- - -class: pic - -![that one is more like the real thing](images/k8s-arch2.png) - ---- - -## Credits - -- The first schema is a Kubernetes cluster with storage backed by multi-path iSCSI - - (Courtesy of [Yongbok Kim](https://www.yongbok.net/blog/)) - -- The second one is a simplified representation of a Kubernetes cluster - - (Courtesy of [Imesh Gunaratne](https://medium.com/containermind/a-reference-architecture-for-deploying-wso2-middleware-on-kubernetes-d4dee7601e8e)) - ---- - -## Kubernetes architecture: the data plane - -- The data plane is a collection of nodes that execute our containers - -- These nodes run a collection of services: - - - a container Engine (typically Docker) - - - kubelet (the "node agent") - - - kube-proxy (a necessary but not sufficient network component) - -- Nodes were formerly called "minions" - - (You might see that word in older articles or documentation) - ---- - -## Kubernetes architecture: the control plane - -- The Kubernetes logic (its "brains") is a collection of services: - - - the API server (our point of entry to everything!) - - - core services like the scheduler and controller manager - - - `etcd` (a highly available key/value store; the "database" of Kubernetes) - -- Together, these services form the control plane of our cluster - -- The control plane is also called the "master" - ---- - -class: pic - -![One of the best Kubernetes architecture diagrams available](images/k8s-arch4-thanks-luxas.png) - ---- - -class: extra-details - -## Running the control plane on special nodes - -- PKS reserves dedicated node[s] for the control plane - -- This node is then called a "master" - - (Yes, this is ambiguous: is the "master" a node, or the whole control plane?) - -- Normal applications are restricted from running on this node - -- When high availability is required, each service of the control plane must be resilient - -- The control plane is then replicated on multiple nodes - - (This is sometimes called a "multi-master" setup) - ---- - -class: extra-details - -## Do we need to run Docker at all? - -No! - --- - -- By default, Kubernetes uses the Docker Engine to run containers - -- We could also use `rkt` ("Rocket") from CoreOS - -- Or leverage other pluggable runtimes through the *Container Runtime Interface* - - (like CRI-O, or containerd) - ---- - -class: extra-details - -## Do we need to run Docker at all? - -Yes! - --- - -- Our Kubernetes cluster is using Docker as the container engine - -- We still use it to build images and ship them around - -- We can do these things without Docker -
- (and get diagnosed with NIH¹ syndrome) - -- Docker is still the most stable container engine today -
- (but other options are maturing very quickly) - -.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)] - ---- - -class: extra-details - -## Do we need to run Docker at all? - -- On our development environments, CI pipelines ... : - - *Yes, almost certainly* - -- On our production servers: - - *Yes (today)* - - *Probably not (in the future)* - -.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)] - ---- - -## Interacting with Kubernetes - -- We will interact with our Kubernetes cluster through the Kubernetes API - -- The Kubernetes API is (mostly) RESTful - -- It allows us to create, read, update, delete *resources* - -- A few common resource types are: - - - node (a machine — physical or virtual — in our cluster) - - - pod (group of containers running together on a node) - - - service (stable network endpoint to connect to one or multiple containers) - ---- - -class: pic - -![Node, pod, container](images/k8s-arch3-thanks-weave.png) - ---- - -## Credits - -- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha) - - - it's one of the best Kubernetes architecture diagrams available! - -- The second diagram is courtesy of Weave Works - - - a *pod* can have multiple containers working together - - - IP addresses are associated with *pods*, not with individual containers - -Both diagrams used with permission. diff --git a/slides/pks/connecting.md b/slides/pks/connecting.md deleted file mode 100644 index 810bf5d37..000000000 --- a/slides/pks/connecting.md +++ /dev/null @@ -1,106 +0,0 @@ -class: in-person - -## Connecting to our lab environment - -.exercise[ - -- Log into https://gangway.workshop.paulczar.wtf with your provided credentials. - -- Follow the instructions on the auth portal to set up a `kubeconfig` file. - -- Check that you can connect to the cluster with `kubectl cluster-info`: - -```bash -$ kubectl cluster-info -Kubernetes master is running at https://k8s.cluster1.demo.paulczar.wtf:8443 -CoreDNS is running at https://k8s.cluster1.demo.paulczar.wtf:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy -``` -] - -If anything goes wrong — ask for help! - ---- - -## Role Based Authorization Control - -You are restricted to a subset of Kubernetes resources in your own namespace. Just like in a real world enterprise cluster. - - -.exercise[ - -1\. Can you create pods? - -``` -$ kubectl auth can-i create pods -``` - -2\. Can you delete namespaces? - -``` -$ kubectl auth can-i delete namespaces -``` -] --- - -1. You can create pods in your own namespace. -2. You cannot delete namespaces. ---- - -## Doing or re-doing the workshop on your own? - -- Use something like - [Play-With-Docker](http://play-with-docker.com/) or - [Play-With-Kubernetes](https://training.play-with-kubernetes.com/) - - Zero setup effort; but environment are short-lived and - might have limited resources - -- Create your own cluster (local or cloud VMs) - - Small setup effort; small cost; flexible environments - -- Create a bunch of clusters for you and your friends - ([instructions](https://@@GITREPO@@/tree/master/prepare-vms)) - - Bigger setup effort; ideal for group training - ---- - -class: self-paced - -## Get your own Docker nodes - -- If you already have some Docker nodes: great! - -- If not: let's get some thanks to Play-With-Docker - -.exercise[ - -- Go to http://www.play-with-docker.com/ - -- Log in - -- Create your first node - - - -] - -You will need a Docker ID to use Play-With-Docker. - -(Creating a Docker ID is free.) - ---- - -## Terminals - -Once in a while, the instructions will say: -
"Open a new terminal." - -There are multiple ways to do this: - -- create a new window or tab on your machine, and SSH into the VM; - -- use screen or tmux on the VM and open a new window from there. - -You are welcome to use the method that you feel the most comfortable with. diff --git a/slides/pks/dashboard.md b/slides/pks/dashboard.md deleted file mode 100644 index 050204904..000000000 --- a/slides/pks/dashboard.md +++ /dev/null @@ -1,116 +0,0 @@ -# The Kubernetes dashboard - -- Kubernetes resources can be viewed with a web dashboard - -- That dashboard is usually exposed over HTTPS - - (this requires obtaining a proper TLS certificate) - -- Dashboard users need to authenticate - -- Most people just YOLO it into their cluster and then get hacked - ---- - -## Stop the madness - -You know what, this is all a very bad idea. Let's not run the Kubernetes dashboard at all ... ever. - -The following slides are informational. **Do not run them**. - ---- - -## The insecure method - -- We could (and should) use [Let's Encrypt](https://letsencrypt.org/) ... - -- ... but we don't want to deal with TLS certificates - -- We could (and should) learn how authentication and authorization work ... - -- ... but we will use a guest account with admin access instead - -.footnote[.warning[Yes, this will open our cluster to all kinds of shenanigans. Don't do this at home.]] - ---- - -## Running a very insecure dashboard - -- We are going to deploy that dashboard with *one single command* - -- This command will create all the necessary resources - - (the dashboard itself, the HTTP wrapper, the admin/guest account) - -- All these resources are defined in a YAML file - -- All we have to do is load that YAML file with with `kubectl apply -f` - -.exercise[ - -- Create all the dashboard resources, with the following command: - ```bash - kubectl apply -f ~/container.training/k8s/insecure-dashboard.yaml - ``` - -] - ---- - -## Connecting to the dashboard - -.exercise[ - -- Check which port the dashboard is on: - ```bash - kubectl get svc dashboard - ``` - -] - -You'll want the `3xxxx` port. - - -.exercise[ - -- Connect to http://oneofournodes:3xxxx/ - - - -] - -The dashboard will then ask you which authentication you want to use. - ---- - -## Dashboard authentication - -- We have three authentication options at this point: - - - token (associated with a role that has appropriate permissions) - - - kubeconfig (e.g. using the `~/.kube/config` file from `node1`) - - - "skip" (use the dashboard "service account") - -- Let's use "skip": we're logged in! - --- - -.warning[By the way, we just added a backdoor to our Kubernetes cluster!] - ---- - -## Running the Kubernetes dashboard securely - -- The steps that we just showed you are *for educational purposes only!* - -- If you do that on your production cluster, people [can and will abuse it](https://redlock.io/blog/cryptojacking-tesla) - -- For an in-depth discussion about securing the dashboard, -
- check [this excellent post on Heptio's blog](https://blog.heptio.com/on-securing-the-kubernetes-dashboard-16b09b1b7aca) - --- - -- Or better yet, don't use the dashboard. Use Octant. diff --git a/slides/pks/helm-intro.md b/slides/pks/helm-intro.md deleted file mode 100644 index e1b0bc869..000000000 --- a/slides/pks/helm-intro.md +++ /dev/null @@ -1,346 +0,0 @@ -# Managing stacks with Helm - -- We created our first resources with `kubectl run`, `kubectl expose` ... - -- We have also created resources by loading YAML files with `kubectl apply -f` - -- For larger stacks, managing thousands of lines of YAML is unreasonable - -- These YAML bundles need to be customized with variable parameters - - (E.g.: number of replicas, image version to use ...) - -- It would be nice to have an organized, versioned collection of bundles - -- It would be nice to be able to upgrade/rollback these bundles carefully - -- [Helm](https://helm.sh/) is an open source project offering all these things! - ---- - -## Helm concepts - -- `helm` is a CLI tool - -- It is used to find, install, upgrade *charts* - -- A chart is an archive containing templatized YAML bundles - -- Charts are versioned - -- Charts can be stored on private or public repositories - ---- - -## Differences between charts and packages - -- A package (deb, rpm...) contains binaries, libraries, etc. - -- A chart contains YAML manifests - - (the binaries, libraries, etc. are in the images referenced by the chart) - -- On most distributions, a package can only be installed once - - (installing another version replaces the installed one) - -- A chart can be installed multiple times - -- Each installation is called a *release* - -- This allows to install e.g. 10 instances of MongoDB - - (with potentially different versions and configurations) - ---- - -class: extra-details - -## Wait a minute ... - -*But, on my Debian system, I have Python 2 **and** Python 3. -
-Also, I have multiple versions of the Postgres database engine!* - -Yes! - -But they have different package names: - -- `python2.7`, `python3.8` - -- `postgresql-10`, `postgresql-11` - -Good to know: the Postgres package in Debian includes -provisions to deploy multiple Postgres servers on the -same system, but it's an exception (and it's a lot of -work done by the package maintainer, not by the `dpkg` -or `apt` tools). - ---- - -## Helm 2 vs Helm 3 - -- Helm 3 was released [November 13, 2019](https://helm.sh/blog/helm-3-released/) - -- Charts remain compatible between Helm 2 and Helm 3 - -- The CLI is very similar (with minor changes to some commands) - -- The main difference is that Helm 2 uses `tiller`, a server-side component - -- Helm 3 doesn't use `tiller` at all, making it simpler (yay!) - ---- - -class: extra-details - -## With or without `tiller` - -- With Helm 3: - - - the `helm` CLI communicates directly with the Kubernetes API - - - it creates resources (deployments, services...) with our credentials - -- With Helm 2: - - - the `helm` CLI communicates with `tiller`, telling `tiller` what to do - - - `tiller` then communicates with the Kubernetes API, using its own credentials - -- This indirect model caused significant permissions headaches - - (`tiller` required very broad permissions to function) - -- `tiller` was removed in Helm 3 to simplify the security aspects - ---- - -## Installing Helm - -- If the `helm` CLI is not installed in your environment, install it - -.exercise[ - -- Check if `helm` is installed: - ```bash - helm version - ``` - -- If it's not installed (or its helm 2), run the following command: - ```bash - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 \ - | bash - ``` - -] - ---- - -## Charts and repositories - -- A *repository* (or repo in short) is a collection of charts - -- It's just a bunch of files - - (they can be hosted by a static HTTP server, or on a local directory) - -- We can add "repos" to Helm, giving them a nickname - -- The nickname is used when referring to charts on that repo - - (for instance, if we try to install `hello/world`, that - means the chart `world` on the repo `hello`; and that repo - `hello` might be something like https://blahblah.hello.io/charts/) - ---- - -## Managing repositories - -- Let's check what repositories we have, and add the `stable` repo - - (the `stable` repo contains a set of official-ish charts) - -.exercise[ - -- List our repos: - ```bash - helm repo list - ``` - -- Add the `stable` repo: - ```bash - helm repo add stable https://kubernetes-charts.storage.googleapis.com/ - ``` - -] - -Adding a repo can take a few seconds (it downloads the list of charts from the repo). - -It's OK to add a repo that already exists (it will merely update it). - ---- - -## Search available charts - -- We can search available charts with `helm search` - -- We need to specify where to search (only our repos, or Helm Hub) - -- Let's search for all charts mentioning tomcat! - -.exercise[ - -- Search for tomcat in the repo that we added earlier: - ```bash - helm search repo tomcat - ``` - -- Search for tomcat on the Helm Hub: - ```bash - helm search hub tomcat - ``` - -] - -[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server. - ---- - -## Charts and releases - -- "Installing a chart" means creating a *release* - -- We need to name that release - - (or use the `--generate-name` to get Helm to generate one for us) - -.exercise[ - -- Install the tomcat chart that we found earlier: - ```bash - helm install java4ever stable/tomcat - ``` - -- List the releases: - ```bash - helm list - ``` - -] - ---- - -## Viewing resources of a release - -- This specific chart labels all its resources with a `release` label - -- We can use a selector to see these resources - -.exercise[ - -- List all the resources created by this release: - ```bash - kubectl get all --selector=release=java4ever - ``` - -] - -Note: this `release` label wasn't added automatically by Helm. -
-It is defined in that chart. In other words, not all charts will provide this label. - ---- - -## Configuring a release - -- By default, `stable/tomcat` creates a service of type `LoadBalancer` - -- We would like to change that to a `NodePort` - -- We could use `kubectl edit service java4ever-tomcat`, but ... - - ... our changes would get overwritten next time we update that chart! - -- Instead, we are going to *set a value* - -- Values are parameters that the chart can use to change its behavior - -- Values have default values - -- Each chart is free to define its own values and their defaults - ---- - -## Checking possible values - -- We can inspect a chart with `helm show` or `helm inspect` - -.exercise[ - -- Look at the README for tomcat: - ```bash - helm show readme stable/tomcat - ``` - -- Look at the values and their defaults: - ```bash - helm show values stable/tomcat - ``` - -] - -The `values` may or may not have useful comments. - -The `readme` may or may not have (accurate) explanations for the values. - -(If we're unlucky, there won't be any indication about how to use the values!) - ---- - -## Setting values - -- Values can be set when installing a chart, or when upgrading it - -- We are going to update `java4ever` to change the type of the service - -.exercise[ - -- Update `java4ever`: - ```bash - helm upgrade java4ever stable/tomcat --set service.type=NodePort - ``` - -] - -Note that we have to specify the chart that we use (`stable/tomcat`), -even if we just want to update some values. - -We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values. - -All unspecified values will take the default values defined in the chart. - ---- - -## Connecting to tomcat - -- Let's check the tomcat server that we just installed - -- Note: its readiness probe has a 60s delay - - (so it will take 60s after the initial deployment before the service works) - -.exercise[ - -- Check the node port allocated to the service: - ```bash - kubectl get service java4ever-tomcat - PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort}) - ``` - -- Connect to it, checking the demo app on `/sample/`: - ```bash - curl localhost:$PORT/sample/ - ``` - -] diff --git a/slides/pks/helm-wordpress.md b/slides/pks/helm-wordpress.md deleted file mode 100644 index 882a68da3..000000000 --- a/slides/pks/helm-wordpress.md +++ /dev/null @@ -1,109 +0,0 @@ -## Why wordpress its 2019?!?! - -I know ... funny right :) - ---- - -## Helm install notes - -- You'll notice a helpful message after running `helm install` that looks something like this: - -``` -NOTES: -1. Get the WordPress URL: - - echo "WordPress URL: http://127.0.0.1:8080/" - echo "WordPress Admin URL: http://127.0.0.1:8080/admin" - kubectl port-forward --namespace user1 svc/wp-wordpress 8080:80 - -2. Login with the following credentials to see your blog - - echo Username: user - echo Password: $(kubectl get secret --namespace user1 wp-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode) -``` - --- - -Helm charts generally have a `NOTES.txt` template that is rendered out and displayed after helm commands are run. Pretty neat. - ---- - -## What did helm install ? - -- Run `kubectl get all` to check what resources helm installed - -.exercise[ - - Run `kubectl get all`: - ```bash - kubectl get all - ``` - -] ---- - -## What did helm install ? - -``` -NAME READY STATUS RESTARTS AGE -pod/wp-mariadb-0 1/1 Running 0 11m -pod/wp-wordpress-6cb9cfc94-chbr6 1/1 Running 0 11m - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/wp-mariadb ClusterIP 10.100.200.87 3306/TCP 11m -service/wp-wordpress ClusterIP 10.100.200.131 80/TCP,443/TCP 11m - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/wp-wordpress 1/1 1 1 11m - -NAME DESIRED CURRENT READY AGE -replicaset.apps/tiller-deploy-6487f7bfd8 1 1 1 2d6h -replicaset.apps/tiller-deploy-75ccf68856 0 0 0 2d6h -replicaset.apps/wp-wordpress-6cb9cfc94 1 1 1 11m - -NAME READY AGE -statefulset.apps/wp-mariadb 1/1 11m - -``` - ---- - -## Check if wordpress is working - -- Using the notes provided from helm check you can access your wordpress and login as `user` - -.exercise[ - - run the commands provided by the helm summary: - ```bash - echo Username: user - echo Password: $(kubectl get secret --namespace user1 wp-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode) - - kubectl port-forward --namespace user1 svc/wp-wordpress 8080:80 - ``` -] - --- - -Yay? you have a 2003 era blog - ---- - -## Helm Chart Values - -Settings values on the command line is okay for a demonstration, but we should really be creating a `~/workshop/values.yaml` file for our chart. Let's do that now. - -> the values file is a bit long to copy/paste from here, so lets wget it. - -.exercise[ - - Download the values.yaml file and edit it, changing the URL prefix to be `-wp`: - ```bash - wget -O ~/workshop/values.yaml \ - https://raw.githubusercontent.com/paulczar/container.training/pks/slides/pks/wp/values.yaml - - vim ~/workshop/values.yaml - - helm upgrade wp stable/wordpress -f ~/workshop/values.yaml - - ``` -] - ---- \ No newline at end of file diff --git a/slides/pks/helm.md b/slides/pks/helm.md deleted file mode 100644 index 51ab4e951..000000000 --- a/slides/pks/helm.md +++ /dev/null @@ -1,217 +0,0 @@ -# Managing stacks with Helm - -- We created our first resources with `kubectl run`, `kubectl expose` ... - -- We have also created resources by loading YAML files with `kubectl apply -f` - -- For larger stacks, managing thousands of lines of YAML is unreasonable - -- These YAML bundles need to be customized with variable parameters - - (E.g.: number of replicas, image version to use ...) - -- It would be nice to have an organized, versioned collection of bundles - -- It would be nice to be able to upgrade/rollback these bundles carefully - -- [Helm](https://helm.sh/) is an open source project offering all these things! - ---- - -## Helm concepts - -- `helm` is a CLI tool - -- `tiller` is its companion server-side component - -- A "chart" is an archive containing templatized YAML bundles - -- Charts are versioned - -- Charts can be stored on private or public repositories - --- - -*We're going to use the beta of Helm 3 as it does not require `tiller` making things simpler and more secure for us.* ---- - -## Installing Helm - -- If the `helm` 3 CLI is not installed in your environment, [install it](https://github.com/helm/helm/releases/tag/v3.0.0-beta.1) - -.exercise[ - -- Check if `helm` is installed: - ```bash - helm version - ``` -] - --- - -```bash -version.BuildInfo{Version:"v3.0.0-beta.1", GitCommit:"f76b5f21adb53a85de8925f4a9d4f9bd99f185b5", GitTreeState:"clean", GoVersion:"go1.12.9"}` -``` - ---- - -## Oops you accidently a Helm 2 - -If `helm version` gives you a result like below it means you have helm 2 which requires the `tiller` server side component. - -``` -Client: &version.Version{SemVer:"v2.14.0", GitCommit:"05811b84a3f93603dd6c2fcfe57944dfa7ab7fd0", GitTreeState:"clean"} -Error: forwarding ports: error upgrading connection: pods "tiller-deploy-6fd87785-x8sxk" is forbidden: User "user1" cannot create resource "pods/portforward" in API group "" in the namespace "kube-system" -``` - -Run `EXPORT TILLER_NAMESPACE=` and try again. We've pre-installed `tiller` for you in your namespace just in case. - --- - -Some of the commands in the following may not work in helm 2. Good luck! - ---- - -## Installing Tiller - -*If you were running Helm 2 you would need to install Tiller. We can skip this.* - -- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace - -- They can be managed (installed, upgraded...) with the `helm` CLI - -.exercise[ - -- Deploy Tiller: - ```bash - helm init - ``` - -] - -If Tiller was already installed, don't worry: this won't break it. - -At the end of the install process, you will see: - -``` -Happy Helming! -``` - ---- - -## Fix account permissions - -*If you were running Helm 2 you would need to install Tiller. We can skip this.* - -- Helm permission model requires us to tweak permissions - -- In a more realistic deployment, you might create per-user or per-team - service accounts, roles, and role bindings - -.exercise[ - -- Grant `cluster-admin` role to `kube-system:default` service account: - ```bash - kubectl create clusterrolebinding add-on-cluster-admin \ - --clusterrole=cluster-admin --serviceaccount=kube-system:default - ``` - -] - -(Defining the exact roles and permissions on your cluster requires -a deeper knowledge of Kubernetes' RBAC model. The command above is -fine for personal and development clusters.) - ---- - -## View available charts - -- A public repo is pre-configured when installing Helm - -- We can view available charts with `helm search` (and an optional keyword) - -.exercise[ - -- View all available charts: - ```bash - helm search hub - ``` - -- View charts related to `prometheus`: - ```bash - helm search hub prometheus - ``` - -] - ---- - -## Add the stable chart repository - -- Helm 3 does not come configured with any repositories, so we need to start by adding the stable repo. - -.exercise[ - - Add the stable repo - ```bash - helm repo add stable https://kubernetes-charts.storage.googleapis.com/ - helm repo update - ``` -] - ---- - -## Install a chart - -- Most charts use `LoadBalancer` service types by default - -- Most charts require persistent volumes to store data - -- We can relax these requirements a bit - -.exercise[ - -- Install on our cluster: - ```bash - helm install wp stable/wordpress \ - --set service.type=ClusterIP \ - --set persistence.enabled=false \ - --set mariadb.master.persistence.enabled=false - ``` -] - - -Where do these `--set` options come from? - ---- - -## Inspecting a chart - -- `helm inspect` shows details about a chart (including available options) - -.exercise[ - -- See the metadata and all available options for `stable/wordpress`: - ```bash - helm inspect stable/wordpress - ``` - -] - -The chart's metadata includes a URL to the project's home page. - -(Sometimes it conveniently points to the documentation for the chart.) - ---- - -## Viewing installed charts - -- Helm keeps track of what we've installed - -.exercise[ - -- List installed Helm charts: - ```bash - helm list - ``` - -] diff --git a/slides/pks/httpenv-update.md b/slides/pks/httpenv-update.md deleted file mode 100644 index de8ea6b84..000000000 --- a/slides/pks/httpenv-update.md +++ /dev/null @@ -1,387 +0,0 @@ -# Rolling updates - -- By default (without rolling updates), when a scaled resource is updated: - - - new pods are created - - - old pods are terminated - - - ... all at the same time - - - if something goes wrong, ¯\\\_(ツ)\_/¯ - ---- - -## Rolling updates - -- With rolling updates, when a Deployment is updated, it happens progressively - -- The Deployment controls multiple Replica Sets - -- Each Replica Set is a group of identical Pods - - (with the same image, arguments, parameters ...) - -- During the rolling update, we have at least two Replica Sets: - - - the "new" set (corresponding to the "target" version) - - - at least one "old" set - -- We can have multiple "old" sets - - (if we start another update before the first one is done) - ---- - -## Update strategy - -- Two parameters determine the pace of the rollout: `maxUnavailable` and `maxSurge` - -- They can be specified in absolute number of pods, or percentage of the `replicas` count - -- At any given time ... - - - there will always be at least `replicas`-`maxUnavailable` pods available - - - there will never be more than `replicas`+`maxSurge` pods in total - - - there will therefore be up to `maxUnavailable`+`maxSurge` pods being updated - -- We have the possibility of rolling back to the previous version -
(if the update fails or is unsatisfactory in any way) - ---- - -## Checking current rollout parameters - -- Recall how we build custom reports with `kubectl` and `jq`: - -.exercise[ - -- Show the rollout plan for our deployments: - ```bash - kubectl get deploy -o json | - jq ".items[] | {name:.metadata.name} + .spec.strategy.rollingUpdate" - ``` - -] - ---- - -## Rolling updates in practice - -- As of Kubernetes 1.8, we can do rolling updates with: - - `deployments`, `daemonsets`, `statefulsets` - -- Editing one of these resources will automatically result in a rolling update - -- Rolling updates can be monitored with the `kubectl rollout` subcommand - ---- - -## Rolling out the new `worker` service - -.exercise[ - -- Let's monitor what's going on by opening a few terminals, and run: - ```bash - kubectl get pods -w - kubectl get replicasets -w - kubectl get deployments -w - ``` - - - -- Update `httpenv` either with `kubectl edit`, or by running: - ```bash - kubectl set env -e "hello=world" deployment httpenv - ``` -] --- - - -Deployments treat environment variable changes as a upgrade. You should see the rollout occur. - ---- - -## Verify rollout - -- Remember our `httpenv` app prints out our env variables... - -.exercise[ - -- get the IP of the service: - ```bash - IP=`kubectl get svc httpenv \ - -o jsonpath="{.status.loadBalancer.ingress[*].ip}"` - echo $IP - ``` - -- check the app now shows this new environment variable: - - ```bash - curl $IP:8888 - ``` - or - ```bash - curl -s $IP:8888 | jq .hello - ``` -] - --- - -"hello": "world" - ---- - -## Rolling out something invalid - -- What happens if we make a mistake? - -.exercise[ - -- Update `httpenv` by specifying a non-existent image: - ```bash - kubectl set image deploy httpenv httpenv=not-a-real-image - ``` - -- Check what's going on: - ```bash - kubectl rollout status deploy httpenv - ``` - - - -] - --- - -Our rollout is stuck. However, the app is not dead. - ---- - -## What's going on with our rollout? - -- Let's look at our app: - -.exercise[ - - - Check our pods: - ```bash - kubectl get pods - ``` -] - --- - -We have 8 running pods, and 5 failing pods. - ---- - -Why do we have 8 running pods? we should have 10 - -- Because `MaxUnavailable=25%` - - ... So the rollout terminated 2 replicas out of 10 available - -- Okay, but why do we see 5 new replicas being rolled out? - -- Because `MaxSurge=25%` - - ... So in addition to replacing 2 replicas, the rollout is also starting 3 more - -- It rounded down the number of MaxUnavailable pods conservatively, -
- but the total number of pods being rolled out is allowed to be 25+25=50% - ---- - -class: extra-details - -## The nitty-gritty details - -- We start with 10 pods running for the `httpenv` deployment - -- Current settings: MaxUnavailable=25% and MaxSurge=25% - -- When we start the rollout: - - - two replicas are taken down (as per MaxUnavailable=25%) - - two others are created (with the new version) to replace them - - three others are created (with the new version) per MaxSurge=25%) - -- Now we have 8 replicas up and running, and 5 being deployed - -- Our rollout is stuck at this point! - ---- - -## Recovering from a bad rollout - -- We could push the missing image to our registry - - (the pod retry logic will eventually catch it and the rollout will proceed) - -- Or we could invoke a manual rollback - -.exercise[ - - - -- Cancel the deployment and wait for the dust to settle: - ```bash - kubectl rollout undo deploy httpenv - kubectl rollout status deploy httpenv - ``` - -] - ---- - -## Rolling back to an older version - -- We reverted to our original working image :) - -- We have 10 replicas running again. - ---- - -## Multiple "undos" - -- What happens if we try `kubectl rollout undo` again? - -.exercise[ - -- Try it: - ```bash - kubectl rollout undo deployment httpenv - ``` - -- Check the web UI, the list of pods ... - -] - -🤔 That didn't work. - ---- - -## Multiple "undos" don't work - -- If we see successive versions as a stack: - - - `kubectl rollout undo` doesn't "pop" the last element from the stack - - - it copies the N-1th element to the top - -- Multiple "undos" just swap back and forth between the last two versions! - -.exercise[ - -- Go back to the original version again: - ```bash - kubectl rollout undo deployment httpenv - ``` -] - ---- - -## Listing versions - -- We can list successive versions of a Deployment with `kubectl rollout history` - -.exercise[ - -- Look at our successive versions: - ```bash - kubectl rollout history deployment httpenv - ``` - -] - -We don't see *all* revisions. - -We might see something like 1, 4, 5. - -(Depending on how many "undos" we did before.) - ---- - -## Explaining deployment revisions - -- These revisions correspond to our Replica Sets - -- This information is stored in the Replica Set annotations - -.exercise[ - -- Check the annotations for our replica sets: - ```bash - kubectl describe replicasets -l app=httpenv | grep -A3 ^Annotations - ``` - -] - ---- - -class: extra-details - -## What about the missing revisions? - -- The missing revisions are stored in another annotation: - - `deployment.kubernetes.io/revision-history` - -- These are not shown in `kubectl rollout history` - -- We could easily reconstruct the full list with a script - - (if we wanted to!) - ---- - -## Rolling back to an older version - -- `kubectl rollout undo` can work with a revision number - -.exercise[ - -- Roll back to the "known good" deployment version: - ```bash - kubectl rollout undo deployment httpenv --to-revision=1 - ``` - -- Check the web UI via curl again - ```bash - curl $IP:8888 - ``` --- - -the `hello world` environment variable has gone as we're right back to the original revision of our application. - -] - ---- - -## Cleanup - -.exercise[ - -- Delete all of the deployments, services, and cronjobs: - - ```bash - kubectl delete deployments,cronjobs,services --all - ``` - -] - --- - -Using `--all` on a delete is really destructive, be very careful with it. diff --git a/slides/pks/ingress.md b/slides/pks/ingress.md deleted file mode 100644 index b296ccfd7..000000000 --- a/slides/pks/ingress.md +++ /dev/null @@ -1,247 +0,0 @@ -# Exposing HTTP services with Ingress resources - -- *Services* give us a way to access a pod or a set of pods - -- Services can be exposed to the outside world: - - - with type `NodePort` (on a port >30000) - - - with type `LoadBalancer` (allocating an external load balancer) - -- What about HTTP services? - - - how can we expose `webui`, `rng`, `hasher`? - - - the Kubernetes dashboard? - - - a new version of `webui`? - ---- - -## Exposing HTTP services - -- If we use `NodePort` services, clients have to specify port numbers - - (i.e. http://xxxxx:31234 instead of just http://xxxxx) - -- `LoadBalancer` services are nice, but: - - - they are not available in all environments - - - they often carry an additional cost (e.g. they provision an ELB) - - - they require one extra step for DNS integration -
- (waiting for the `LoadBalancer` to be provisioned; then adding it to DNS) - -- We could build our own reverse proxy - ---- - -## Building a custom reverse proxy - -- There are many options available: - - Apache, HAProxy, Hipache, NGINX, Traefik, ... - - (look at [jpetazzo/aiguillage](https://github.com/jpetazzo/aiguillage) for a minimal reverse proxy configuration using NGINX) - -- Most of these options require us to update/edit configuration files after each change - -- Some of them can pick up virtual hosts and backends from a configuration store - -- Wouldn't it be nice if this configuration could be managed with the Kubernetes API? - --- - -- Enter.red[¹] *Ingress* resources! - -.footnote[.red[¹] Pun maybe intended.] - ---- - -## Ingress resources - -- Kubernetes API resource (`kubectl get ingress`/`ingresses`/`ing`) - -- Designed to expose HTTP services - -- Basic features: - - - load balancing - - SSL termination - - name-based virtual hosting - -- Can also route to different services depending on: - - - URI path (e.g. `/api`→`api-service`, `/static`→`assets-service`) - - Client headers, including cookies (for A/B testing, canary deployment...) - - and more! - ---- - -## Principle of operation - -- Step 1: deploy an *ingress controller* - - - ingress controller = load balancer + control loop - - - the control loop watches over ingress resources, and configures the LB accordingly - -- Step 2: set up DNS - - - associate DNS entries with the load balancer address - -- Step 3: create *ingress resources* - - - the ingress controller picks up these resources and configures the LB - -- Step 4: profit! - ---- - -## Ingress in action - -- We already have an nginx-ingress controller deployed - -- For DNS, we have a wildcard set up pointing at our ingress LB - - - `*.ingress.workshop.paulczar.wtf` - -- We will create ingress resources for various HTTP services - ---- - -## Checking that nginx-ingress runs correctly - -- If Traefik started correctly, we now have a web server listening on each node - -.exercise[ - -- Check that nginx is serving 80/tcp: - ```bash - curl test.ingress.workshop.paulczar.wtf - ``` - -] - -We should get a `404 page not found` error. - -This is normal: we haven't provided any ingress rule yet. - ---- - -## Expose that webui - -- Before we can enable the ingress, we need to create a service for the webui - -.exercise[ - - - create a service for the webui deployment - ```bash - kubectl expose deployment webui --port 80 - ``` - -] - ---- - - -## Setting up host-based routing ingress rules - -- We are going to create an ingress rule for our webui - -.exercise[ - - Write this to `~/workshop/ingress.yaml` and change the host prefix -] - -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - name: webui -spec: - rules: - - host: user1.ingress.workshop.paulczar.wtf - http: - paths: - - path: / - backend: - serviceName: webui - servicePort: 80 -``` - ---- - -## Creating our ingress resources - -.exercise[ - - Apply the ingress manifest - ```bash - kubectl apply -f ~/workshop/ingress.yaml - ``` -] - --- - -```bash -$ curl user1.ingress.workshop.paulczar.wtf -Found. Redirecting to /index.html -``` - - ---- - -## Using multiple ingress controllers - -- You can have multiple ingress controllers active simultaneously - - (e.g. Traefik and NGINX) - -- You can even have multiple instances of the same controller - - (e.g. one for internal, another for external traffic) - -- The `kubernetes.io/ingress.class` annotation can be used to tell which one to use - -- It's OK if multiple ingress controllers configure the same resource - - (it just means that the service will be accessible through multiple paths) - ---- - -## Ingress: the good - -- The traffic flows directly from the ingress load balancer to the backends - - - it doesn't need to go through the `ClusterIP` - - - in fact, we don't even need a `ClusterIP` (we can use a headless service) - -- The load balancer can be outside of Kubernetes - - (as long as it has access to the cluster subnet) - -- This allows the use of external (hardware, physical machines...) load balancers - -- Annotations can encode special features - - (rate-limiting, A/B testing, session stickiness, etc.) - ---- - -## Ingress: the bad - -- Aforementioned "special features" are not standardized yet - -- Some controllers will support them; some won't - -- Even relatively common features (stripping a path prefix) can differ: - - - [traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip](https://docs.traefik.io/user-guide/kubernetes/#path-based-routing) - - - [ingress.kubernetes.io/rewrite-target: /](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/rewrite) - -- This should eventually stabilize - - (remember that ingresses are currently `apiVersion: networking.k8s.io/v1beta1`) diff --git a/slides/pks/kubectlexpose.md b/slides/pks/kubectlexpose.md deleted file mode 100644 index 09e176735..000000000 --- a/slides/pks/kubectlexpose.md +++ /dev/null @@ -1,398 +0,0 @@ -# Exposing containers - -- `kubectl expose` creates a *service* for existing pods - -- A *service* is a stable address for a pod (or a bunch of pods) - -- If we want to connect to our pod(s), we need to create a *service* - -- Once a service is created, CoreDNS will allow us to resolve it by name - - (i.e. after creating service `hello`, the name `hello` will resolve to something) - -- There are different types of services, detailed on the following slides: - - `ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName` - ---- - -## Basic service types - -- `ClusterIP` (default type) - - - a virtual IP address is allocated for the service (in an internal, private range) - - this IP address is reachable only from within the cluster (nodes and pods) - - our code can connect to the service using the original port number - -- `NodePort` - - - a port is allocated for the service (by default, in the 30000-32768 range) - - that port is made available *on all our nodes* and anybody can connect to it - - our code must be changed to connect to that new port number - -These service types are always available. - -Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` rules. - ---- - -## More service types - -- `LoadBalancer` - - - an external load balancer is allocated for the service - - the load balancer is configured accordingly -
(e.g.: a `NodePort` service is created, and the load balancer sends traffic to that port) - - available only when the underlying infrastructure provides some "load balancer as a service" -
(e.g. AWS, Azure, GCE, OpenStack...) - -- `ExternalName` - - - the DNS entry managed by CoreDNS will just be a `CNAME` to a provided record - - no port, no IP address, no nothing else is allocated - ---- - -class: extra-details - -## If we don't need a clusterIP load balancer - -- Sometimes, we want to access our scaled services directly: - - - if we want to save a tiny little bit of latency (typically less than 1ms) - - - if we need to connect over arbitrary ports (instead of a few fixed ones) - - - if we need to communicate over another protocol than UDP or TCP - - - if we want to decide how to balance the requests client-side - - - ... - -- In that case, we can use a "headless service" - ---- - -class: extra-details - -## Headless services - -- A headless service is obtained by setting the `clusterIP` field to `None` - - (Either with `--cluster-ip=None`, or by providing a custom YAML) - -- As a result, the service doesn't have a virtual IP address - -- Since there is no virtual IP address, there is no load balancer either - -- CoreDNS will return the pods' IP addresses as multiple `A` records - -- This gives us an easy way to discover all the replicas for a deployment - ---- - -## Running containers with open ports - -- Since `ping` doesn't have anything to connect to, we'll have to run something else - -- We could use the `nginx` official image, but ... - - ... we wouldn't be able to tell the backends from each other! - -- We are going to use `jpetazzo/httpenv`, a tiny HTTP server written in Go - -- `jpetazzo/httpenv` listens on port 8888 - -- It serves its environment variables in JSON format - -- The environment variables will include `HOSTNAME`, which will be the pod name - - (and therefore, will be different on each backend) - ---- - -## Creating a deployment for our HTTP server - -- We *could* do `kubectl run httpenv --image=jpetazzo/httpenv` ... - -- But since `kubectl run` is being deprecated, let's see how to use `kubectl create` instead - -.exercise[ - -- In another window, watch the pods (to see when they are created): - ```bash - kubectl get pods -w - ``` - - - -- Create a deployment for this very lightweight HTTP server: - ```bash - kubectl create deployment httpenv --image=jpetazzo/httpenv - ``` - -- Scale it to 10 replicas: - ```bash - kubectl scale deployment httpenv --replicas=10 - ``` - -] - ---- - -## Exposing our deployment - -- We'll create a default `ClusterIP` service - -.exercise[ - -- Expose the HTTP port of our server: - ```bash - kubectl expose deployment httpenv --port 8888 - ``` - -- Look up which IP address was allocated: - ```bash - kubectl get service httpenv - ``` - -] - --- - -The cluster IP is a private IP, you can't access it. - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -httpenv ClusterIP 10.100.200.147 8888/TCP 3m -``` - ---- - -## Services are layer 4 constructs - -- You can assign IP addresses to services, but they are still *layer 4* - - (i.e. a service is not an IP address; it's an IP address + protocol + port) - -- This is caused by the current implementation of `kube-proxy` - - (it relies on mechanisms that don't support layer 3) - -- As a result: you *have to* indicate the port number for your service - -- Running services with arbitrary port (or port ranges) requires hacks - - (e.g. host networking mode) - ---- - -## Testing our service - -- Our service is listening to a private **ClusterIP**. - -- If we want to access it we need to expose it as a **NodePort** or a **LoadBalancer** - -- Or you can cheat and forward a port using `kubectl port-forward` - ---- - -## port forwarding - -- Forwards a local port from your machine into a pod - -.exercise[ - -- Forward a port into your deployment: - ```bash - kubectl port-forward service/httpenv 8888:8888 - ``` - -- In a new window run curl a few times: - ```bash - curl localhost:8888 - curl localhost:8888 - curl localhost:8888 - ``` - -- Hit `ctrl-c` in the original window to terminate the port-forward -] - --- - -The response was the same from each request. This is because `kubectl port-forward` forwards to a specific pod, not to the cluster-ip. - ---- - -class: extra-details - -## Services and endpoints - -- A service has a number of "endpoints" - -- Each endpoint is a host + port where the service is available - -- The endpoints are maintained and updated automatically by Kubernetes - -.exercise[ - -- Check the endpoints that Kubernetes has associated with our `httpenv` service: - ```bash - kubectl describe service httpenv - ``` - -] - -In the output, there will be a line starting with `Endpoints:`. - -That line will list a bunch of addresses in `host:port` format. - ---- - -class: extra-details - -## Viewing endpoint details - -- When we have many endpoints, our display commands truncate the list - ```bash - kubectl get endpoints - ``` - -- If we want to see the full list, we can use one of the following commands: - ```bash - kubectl describe endpoints httpenv - kubectl get endpoints httpenv -o yaml - ``` - -- These commands will show us a list of IP addresses - -- These IP addresses should match the addresses of the corresponding pods: - ```bash - kubectl get pods -l app=httpenv -o wide - ``` - ---- - -class: extra-details - -## `endpoints` not `endpoint` - -- `endpoints` is the only resource that cannot be singular - -```bash -$ kubectl get endpoint -error: the server doesn't have a resource type "endpoint" -``` - -- This is because the type itself is plural (unlike every other resource) - -- There is no `endpoint` object: `type Endpoints struct` - -- The type doesn't represent a single endpoint, but a list of endpoints - ---- - -## Exposing services to the outside world - -- The default type (ClusterIP) only works for internal traffic - -- If we want to accept external traffic, we can use one of these: - - - NodePort (expose a service on a TCP port between 30000-32768) - - - LoadBalancer (provision a cloud load balancer for our service) - - - ExternalIP (use one node's external IP address) - - - Ingress (a special mechanism for HTTP services) - -*We'll see NodePorts and Ingresses more in detail later.* - ---- - -## Exposing services to the outside world - -.exercise[ - -- Take a copy of the httpenv service: - ```bash - kubectl get svc httpenv -o yaml > /tmp/httpenv.yaml - ``` - -- Edit `/tmp/httpenv.yaml` and set the service to be of type `Loadbalancer`, and update the ports: - -```yaml -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 8888 - type: LoadBalancer - -``` - -] - --- - -this is what a kubernetes manifest looks like! - ---- - -## Service Manifest - -.exercise[ - -```yaml -apiVersion: v1 -kind: Service -metadata: - labels: - app: httpenv - name: httpenv -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 8888 - name: http - selector: - app: httpenv - type: LoadBalancer -``` - -] - ---- - -## kubectl apply - -.exercise[ - - - Apply your changes: - ```bash - kubectl delete svc httpenv - kubectl apply -f /tmp/httpenv.yaml - ``` - -] - --- - -Why did we delete the svc? Running a `kubectl apply` on a imperatively created resource can cause problems. - ---- - -## yay loadbalancing - -.exercise[ -- Check for the IP of the loadbalancer: - ```bash - kubectl get svc httpenv - ``` - -- Test access via the loadbalancer: - ```bash - curl :8888 - ``` -] diff --git a/slides/pks/kubectlget.md b/slides/pks/kubectlget.md deleted file mode 100644 index ff283359f..000000000 --- a/slides/pks/kubectlget.md +++ /dev/null @@ -1,359 +0,0 @@ -# First contact with `kubectl` - -- `kubectl` is (almost) the only tool we'll need to talk to Kubernetes - -- It is a rich CLI tool around the Kubernetes API - - (Everything you can do with `kubectl`, you can do directly with the API) - -- On our machines, there is a `~/.kube/config` file with: - - - the Kubernetes API address - - - the path to our TLS certificates used to authenticate - -- You can also use the `--kubeconfig` flag to pass a config file - -- Or directly `--server`, `--user`, etc. - -- `kubectl` can be pronounced "Cube C T L", "Cube cuttle", "Cube cuddle"... - ---- - -## `kubectl get` - -- Let's look at our `Node` resources with `kubectl get`! - -.exercise[ - -- Look at the composition of our cluster: - ```bash - kubectl get node - ``` - -- These commands are equivalent: - ```bash - kubectl get no - kubectl get node - kubectl get nodes - ``` - -] - ---- - -## Obtaining machine-readable output - -- `kubectl get` can output JSON, YAML, or be directly formatted - -.exercise[ - -- Give us more info about the nodes: - ```bash - kubectl get nodes -o wide - ``` - -- Let's have some YAML: - ```bash - kubectl get no -o yaml - ``` - See that `kind: List` at the end? It's the type of our result! - -] - ---- - -## (Ab)using `kubectl` and `jq` - -- It's super easy to build custom reports - -.exercise[ - -- Show the capacity of all our nodes as a stream of JSON objects: - ```bash - kubectl get nodes -o json | - jq ".items[] | {name:.metadata.name} + .status.capacity" - ``` - -] - ---- - -class: extra-details - -## Exploring types and definitions - -- We can list all available resource types by running `kubectl api-resources` -
- (In Kubernetes 1.10 and prior, this command used to be `kubectl get`) - -- We can view the definition for a resource type with: - ```bash - kubectl explain type - ``` - -- We can view the definition of a field in a resource, for instance: - ```bash - kubectl explain node.spec - ``` - -- Or get the full definition of all fields and sub-fields: - ```bash - kubectl explain node --recursive - ``` - ---- - -class: extra-details - -## Introspection vs. documentation - -- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference) - -- The API documentation is usually easier to read, but: - - - it won't show custom types (like Custom Resource Definitions) - - - we need to make sure that we look at the correct version - -- `kubectl api-resources` and `kubectl explain` perform *introspection* - - (they communicate with the API server and obtain the exact type definitions) - ---- - -## Type names - -- The most common resource names have three forms: - - - singular (e.g. `node`, `service`, `deployment`) - - - plural (e.g. `nodes`, `services`, `deployments`) - - - short (e.g. `no`, `svc`, `deploy`) - -- Some resources do not have a short name - -- `Endpoints` only have a plural form - - (because even a single `Endpoints` resource is actually a list of endpoints) - ---- - -## Viewing details - -- We can use `kubectl get -o yaml` to see all available details - -- However, YAML output is often simultaneously too much and not enough - -- For instance, `kubectl get node node1 -o yaml` is: - - - too much information (e.g.: list of images available on this node) - - - not enough information (e.g.: doesn't show pods running on this node) - - - difficult to read for a human operator - -- For a comprehensive overview, we can use `kubectl describe` instead - ---- - -## `kubectl describe` - -- `kubectl describe` needs a resource type and (optionally) a resource name - -- It is possible to provide a resource name *prefix* - - (all matching objects will be displayed) - -- `kubectl describe` will retrieve some extra information about the resource - -.exercise[ - -- Look at the information available for `node1` with one of the following commands: - ```bash - kubectl describe \`k get node -o name | head -1` - ``` - -] - -(We should notice a bunch of control plane pods.) - ---- - -## Services - -- A *service* is a stable endpoint to connect to "something" - - (In the initial proposal, they were called "portals") - -.exercise[ - -- List the services on our cluster with one of these commands: - ```bash - kubectl get services - kubectl get svc - ``` - -] - --- - -There should be no services. This is because you're not running anything yet. But there are some services running in other namespaces. - ---- - -## Services - -- A *service* is a stable endpoint to connect to "something" - - (In the initial proposal, they were called "portals") - -.exercise[ - -- List the services on our cluster with one of these commands: - ```bash - kubectl get services --all-namespaces - kubectl get svc -A - ``` - -] - --- - -There's a bunch of services already running that are used in the operations of the Kubernetes cluster. - ---- - -## ClusterIP services - -- A `ClusterIP` service is internal, available from the cluster only - -- This is useful for introspection from within containers - -*The Cluster IP is only accessible from inside the cluster. We'll explore other ways to expose a service later.* - ---- - -## Listing running containers - -- Containers are manipulated through *pods* - -- A pod is a group of containers: - - - running together (on the same node) - - - sharing resources (RAM, CPU; but also network, volumes) - -.exercise[ - -- List pods on our cluster: - ```bash - kubectl get pods - ``` - -] - --- - -*Where are the pods that we saw just a moment earlier?!?* - ---- - -## Namespaces - -- Namespaces allow us to segregate resources - -.exercise[ - -- List the namespaces on our cluster with one of these commands: - ```bash - kubectl get namespaces - kubectl get namespace - kubectl get ns - ``` - -] - --- - -*You know what ... This `kube-system` thing looks interesting.* - -*In fact, I'm pretty sure it showed up earlier, when we did:* - -`kubectl describe node` - ---- - -## Accessing namespaces - -- By default, `kubectl` uses the `default` namespace - -- We can see resources in all namespaces with `--all-namespaces` - -.exercise[ - -- List the pods in all namespaces: - ```bash - kubectl get pods --all-namespaces - ``` - -- Since Kubernetes 1.14, we can also use `-A` as a shorter version: - ```bash - kubectl get pods -A - ``` - -] - -*Here are our system pods!* - ---- - -## What are all these control plane pods? - -- `kube-apiserver` is the API server - -- `coredns` provides DNS-based service discovery ([replacing kube-dns as of 1.11](https://kubernetes.io/blog/2018/07/10/coredns-ga-for-kubernetes-cluster-dns/)) - - -- the `READY` column indicates the number of containers in each pod - - (1 for most pods, but `coredns` has 3, for instance) - ---- - -## Scoping another namespace - -- We can also look at a different namespace (other than `default`) - -.exercise[ - -- List only the pods in the `kube-system` namespace: - ```bash - kubectl get pods --namespace=kube-system - kubectl get pods -n kube-system - ``` - -] - ---- - -## Namespaces and other `kubectl` commands - -- We can use `-n`/`--namespace` with almost every `kubectl` command - -- Example: - - - `kubectl create --namespace=X` to create something in namespace X - -- We can use `-A`/`--all-namespaces` with most commands that manipulate multiple objects - -- Examples: - - - `kubectl delete` can delete resources across multiple namespaces - - - `kubectl label` can add/remove/update labels across multiple namespaces - --- - -**These commands will not work for you, as you are restricted by Role Based Authentication to only have write access inside your own namespace.** \ No newline at end of file diff --git a/slides/pks/kubectlrun.md b/slides/pks/kubectlrun.md deleted file mode 100644 index 5a437a9ce..000000000 --- a/slides/pks/kubectlrun.md +++ /dev/null @@ -1,614 +0,0 @@ -# Running our first containers on Kubernetes - -- First things first: we cannot run a container - --- - -- We are going to run a pod, and in that pod there will be a single container - --- - -- In that container in the pod, we are going to run a simple `ping` command - -- Then we are going to start additional copies of the pod - ---- - -## Starting a simple pod with `kubectl run` - -- We need to specify at least a *name* and the image we want to use - -.exercise[ - -- Let's ping the address of `localhost`, the loopback interface: - ```bash - kubectl run pingpong --image alpine ping 127.0.0.1 - ``` - - - -] - --- - -(Starting with Kubernetes 1.12, we get a message telling us that -`kubectl run` is deprecated. Let's ignore it for now.) - ---- - -## Behind the scenes of `kubectl run` - -- Let's look at the resources that were created by `kubectl run` - -.exercise[ - -- List most resource types: - ```bash - kubectl get all - ``` - -] - --- - -We should see the following things: -- `deployment.apps/pingpong` (the *deployment* that we just created) -- `replicaset.apps/pingpong-xxxxxxxxxx` (a *replica set* created by the deployment) -- `pod/pingpong-xxxxxxxxxx-yyyyy` (a *pod* created by the replica set) - -Note: as of 1.10.1, resource types are displayed in more detail. - ---- - -## What are these different things? - -- A *deployment* is a high-level construct - - - allows scaling, rolling updates, rollbacks - - - multiple deployments can be used together to implement a - [canary deployment](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#canary-deployments) - - - delegates pods management to *replica sets* - -- A *replica set* is a low-level construct - - - makes sure that a given number of identical pods are running - - - allows scaling - - - rarely used directly - -- A *replication controller* is the (deprecated) predecessor of a replica set - ---- - -## Our `pingpong` deployment - -- `kubectl run` created a *deployment*, `deployment.apps/pingpong` - -``` -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -deployment.apps/pingpong 1 1 1 1 10m -``` - -- That deployment created a *replica set*, `replicaset.apps/pingpong-xxxxxxxxxx` - -``` -NAME DESIRED CURRENT READY AGE -replicaset.apps/pingpong-7c8bbcd9bc 1 1 1 10m -``` - -- That replica set created a *pod*, `pod/pingpong-xxxxxxxxxx-yyyyy` - -``` -NAME READY STATUS RESTARTS AGE -pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m -``` - -- We'll see later how these folks play together for: - - - scaling, high availability, rolling updates - ---- - -## Viewing container output - -- Let's use the `kubectl logs` command - -- We will pass either a *pod name*, or a *type/name* - - (E.g. if we specify a deployment or replica set, it will get the first pod in it) - -- Unless specified otherwise, it will only show logs of the first container in the pod - - (Good thing there's only one in ours!) - -.exercise[ - -- View the result of our `ping` command: - ```bash - kubectl logs deploy/pingpong - ``` - -] - ---- - -## Streaming logs in real time - -- Just like `docker logs`, `kubectl logs` supports convenient options: - - - `-f`/`--follow` to stream logs in real time (à la `tail -f`) - - - `--tail` to indicate how many lines you want to see (from the end) - - - `--since` to get logs only after a given timestamp - -.exercise[ - -- View the latest logs of our `ping` command: - ```bash - kubectl logs deploy/pingpong --tail 1 --follow - ``` - -- Leave that command running, so that we can keep an eye on these logs - - - -] - ---- - -## Scaling our application - -- We can create additional copies of our container (I mean, our pod) with `kubectl scale` - -.exercise[ - -- Scale our `pingpong` deployment: - ```bash - kubectl scale deploy/pingpong --replicas 3 - ``` - -- Note that this command does exactly the same thing: - ```bash - kubectl scale deployment pingpong --replicas 3 - ``` - -] - -Note: what if we tried to scale `replicaset.apps/pingpong-xxxxxxxxxx`? - -We could! But the *deployment* would notice it right away, and scale back to the initial level. - ---- - -## Log streaming - -- Let's look again at the output of `kubectl logs` - - (the one we started before scaling up) - -- `kubectl logs` shows us one line per second - -- We could expect 3 lines per second - - (since we should now have 3 pods running `ping`) - -- Let's try to figure out what's happening! - ---- - -## Streaming logs of multiple pods - -- What happens if we restart `kubectl logs`? - -.exercise[ - -- Interrupt `kubectl logs` (with Ctrl-C) - - - -- Restart it: - ```bash - kubectl logs deploy/pingpong --tail 1 --follow - ``` - - - -] - -`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them. - -Let's leave `kubectl logs` running while we keep exploring. - ---- - - -## Resilience - -- The *deployment* `pingpong` watches its *replica set* - -- The *replica set* ensures that the right number of *pods* are running - -- What happens if pods disappear? - -.exercise[ - -- In a separate window, watch the list of pods: - ```bash - watch kubectl get pods - ``` - - - -- Destroy the pod currently shown by `kubectl logs`: - ``` - kubectl delete pod pingpong-xxxxxxxxxx-yyyyy - ``` - - - -] - ---- - -## What happened? - -- `kubectl delete pod` terminates the pod gracefully - - (sending it the TERM signal and waiting for it to shutdown) - -- As soon as the pod is in "Terminating" state, the Replica Set replaces it - -- But we can still see the output of the "Terminating" pod in `kubectl logs` - -- Until 30 seconds later, when the grace period expires - -- The pod is then killed, and `kubectl logs` exits - ---- - - -## What if we wanted something different? - -- What if we wanted to start a "one-shot" container that *doesn't* get restarted? - -- We could use `kubectl run --restart=OnFailure` or `kubectl run --restart=Never` - -- These commands would create *jobs* or *pods* instead of *deployments* - -- Under the hood, `kubectl run` invokes "generators" to create resource descriptions - -- We could also write these resource descriptions ourselves (typically in YAML), -
and create them on the cluster with `kubectl apply -f` (discussed later) - -- With `kubectl run --schedule=...`, we can also create *cronjobs* - ---- - -## Scheduling periodic background work - -- A Cron Job is a job that will be executed at specific intervals - - (the name comes from the traditional cronjobs executed by the UNIX crond) - -- It requires a *schedule*, represented as five space-separated fields: - - - minute [0,59] - - hour [0,23] - - day of the month [1,31] - - month of the year [1,12] - - day of the week ([0,6] with 0=Sunday) - -- `*` means "all valid values"; `/N` means "every N" - -- Example: `*/3 * * * *` means "every three minutes" - ---- - -## Creating a Cron Job - -- Let's create a simple job to be executed every three minutes - -- Cron Jobs need to terminate, otherwise they'd run forever - -.exercise[ - -- Create the Cron Job: - ```bash - kubectl create cronjob every3mins --image alpine \ - --schedule='*/3 * * * *' --restart OnFailure \ - -- ping -c 3 1.1.1.1 - ``` - -- Check the resource that was created: - ```bash - kubectl get cronjobs - ``` - -] - ---- - -## Cron Jobs in action - -- At the specified schedule, the Cron Job will create a Job - -- The Job will create a Pod - -- The Job will make sure that the Pod completes - - (re-creating another one if it fails, for instance if its node fails) - -.exercise[ - -- Check the Jobs that are created: - ```bash - kubectl get jobs - ``` - -] - -(It will take a few minutes before the first job is scheduled.) - ---- - - -## What about that deprecation warning? - -- As we can see from the previous slide, `kubectl run` can do many things - -- The exact type of resource created is not obvious - -- To make things more explicit, it is better to use `kubectl create`: - - - `kubectl create deployment` to create a deployment - - - `kubectl create job` to create a job - - - `kubectl create cronjob` to run a job periodically -
(since Kubernetes 1.14) - -- Eventually, `kubectl run` will be used only to start one-shot pods - - (see https://github.com/kubernetes/kubernetes/pull/68132) - ---- - -## Various ways of creating resources - -- `kubectl run` - - - easy way to get started - - versatile - -- `kubectl create ` - - - explicit, but lacks some features - - can't create a CronJob before Kubernetes 1.14 - - can't pass command-line arguments to deployments - -- `kubectl create -f foo.yaml` or `kubectl apply -f foo.yaml` - - - all features are available - - requires writing YAML - ---- - -## kubectl create pingpong - -How could we replace the `kubectl run` for the original pingpong deployment ? - -- `kubectl create deployment` doesn't let you specify command/args for the container. - -- We could run `kubectl create deployment pingpong --image alpine --dry-run -o yaml > /tmp/pingpong.yaml` and then modify the manifest. - -- We could use `kubectl patch`: - -```bash -kubectl create deployment pingpong2 --image alpine -kubectl patch deployment pingpong2 -p ' -{"spec":{"template": {"spec": {"containers": -[{"name":"alpine","image":"alpine","command": -["ping","1.1.1.1"]}]}}}}' - -``` - --- - -Yay JSON on the commandline - ---- - -## Viewing logs of multiple pods - -- When we specify a deployment name, only one single pod's logs are shown - -- We can view the logs of multiple pods by specifying a *selector* - -- A selector is a logic expression using *labels* - -- Conveniently, when you `kubectl run somename`, the associated objects have a `run=somename` label - -.exercise[ - -- View the last line of log from all pods with the `run=pingpong` label: - ```bash - kubectl logs -l run=pingpong --tail 1 - ``` - -] - ---- - -### Streaming logs of multiple pods - -- Can we stream the logs of all our `pingpong` pods? - -.exercise[ - -- Combine `-l` and `-f` flags: - ```bash - kubectl logs -l run=pingpong --tail 1 -f - ``` - - - -] - -*Note: combining `-l` and `-f` is only possible since Kubernetes 1.14!* - -*Let's try to understand why ...* - ---- - -class: extra-details - -### Streaming logs of many pods - -- Let's see what happens if we try to stream the logs for more than 5 pods - -.exercise[ - -- Scale up our deployment: - ```bash - kubectl scale deployment pingpong --replicas=8 - ``` - -- Stream the logs: - ```bash - kubectl logs -l run=pingpong --tail 1 -f - ``` - - - -] - -We see a message like the following one: -``` -error: you are attempting to follow 8 log streams, -but maximum allowed concurency is 5, -use --max-log-requests to increase the limit -``` - ---- - -class: extra-details - -## Why can't we stream the logs of many pods? - -- `kubectl` opens one connection to the API server per pod - -- For each pod, the API server opens one extra connection to the corresponding kubelet - -- If there are 1000 pods in our deployment, that's 1000 inbound + 1000 outbound connections on the API server - -- This could easily put a lot of stress on the API server - -- Prior Kubernetes 1.14, it was decided to *not* allow multiple connections - -- From Kubernetes 1.14, it is allowed, but limited to 5 connections - - (this can be changed with `--max-log-requests`) - -- For more details about the rationale, see - [PR #67573](https://github.com/kubernetes/kubernetes/pull/67573) - ---- - -## Shortcomings of `kubectl logs` - -- We don't see which pod sent which log line - -- If pods are restarted / replaced, the log stream stops - -- If new pods are added, we don't see their logs - -- To stream the logs of multiple pods, we need to write a selector - -- There are external tools to address these shortcomings - - (e.g.: [Stern](https://github.com/wercker/stern)) - ---- - -class: extra-details - -## `kubectl logs -l ... --tail N` - -- If we run this with Kubernetes 1.12, the last command shows multiple lines - -- This is a regression when `--tail` is used together with `-l`/`--selector` - -- It always shows the last 10 lines of output for each container - - (instead of the number of lines specified on the command line) - -- The problem was fixed in Kubernetes 1.13 - -*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.* - ---- - -class: extra-details - -## Party tricks involving IP addresses - -- It is possible to specify an IP address with less than 4 bytes - - (example: `127.1`) - -- Zeroes are then inserted in the middle - -- As a result, `127.1` expands to `127.0.0.1` - -- So we can `ping 127.1` to ping `localhost`! - -(See [this blog post](https://ma.ttias.be/theres-more-than-one-way-to-write-an-ip-address/ -) for more details.) - ---- - -class: extra-details - -## More party tricks with IP addresses - -- We can also ping `1.1` - -- `1.1` will expand to `1.0.0.1` - -- This is one of the addresses of Cloudflare's - [public DNS resolver](https://blog.cloudflare.com/announcing-1111/) - -- This is a quick way to check connectivity - - (if we can reach 1.1, we probably have internet access) diff --git a/slides/pks/kubercoins.md b/slides/pks/kubercoins.md deleted file mode 100644 index 3220f5fa4..000000000 --- a/slides/pks/kubercoins.md +++ /dev/null @@ -1,244 +0,0 @@ -# Deploying a sample application - -- We will connect to our new Kubernetes cluster - -- We will deploy a sample application, "DockerCoins" - -- That app features multiple micro-services and a web UI - ---- - -## Connecting to our Kubernetes cluster - -- Our cluster has multiple nodes named `node1`, `node2`, etc. - -- We will do everything from `node1` - -- We have SSH access to the other nodes, but won't need it - - (but we can use it for debugging, troubleshooting, etc.) - -.exercise[ - -- Log into `node1` - -- Check that all nodes are `Ready`: - ```bash - kubectl get nodes - ``` - -] - ---- - -## Cloning some repos - -- We will need two repositories: - - - the first one has the "DockerCoins" demo app - - - the second one has these slides, some scripts, more manifests ... - -.exercise[ - -- Clone the kubercoins repository on `node1`: - ```bash - git clone https://github.com/jpetazzo/kubercoins - ``` - - -- Clone the container.training repository as well: - ```bash - git clone https://@@GITREPO@@ - ``` - -] - ---- - -## Running the application - -Without further ado, let's start this application! - -.exercise[ - -- Apply all the manifests from the kubercoins repository: - ```bash - kubectl apply -f kubercoins/ - ``` - -] - ---- - -## What's this application? - --- - -- It is a DockerCoin miner! .emoji[💰🐳📦🚢] - --- - -- No, you can't buy coffee with DockerCoins - --- - -- How DockerCoins works: - - - generate a few random bytes - - - hash these bytes - - - increment a counter (to keep track of speed) - - - repeat forever! - --- - -- DockerCoins is *not* a cryptocurrency - - (the only common points are "randomness", "hashing", and "coins" in the name) - ---- - -## DockerCoins in the microservices era - -- DockerCoins is made of 5 services: - - - `rng` = web service generating random bytes - - - `hasher` = web service computing hash of POSTed data - - - `worker` = background process calling `rng` and `hasher` - - - `webui` = web interface to watch progress - - - `redis` = data store (holds a counter updated by `worker`) - -- These 5 services are visible in the application's Compose file, - [docker-compose.yml]( - https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) - ---- - -## How DockerCoins works - -- `worker` invokes web service `rng` to generate random bytes - -- `worker` invokes web service `hasher` to hash these bytes - -- `worker` does this in an infinite loop - -- every second, `worker` updates `redis` to indicate how many loops were done - -- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser - -*(See diagram on next slide!)* - ---- - -class: pic - -![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg) - ---- - -## Service discovery in container-land - -How does each service find out the address of the other ones? - --- - -- We do not hard-code IP addresses in the code - -- We do not hard-code FQDNs in the code, either - -- We just connect to a service name, and container-magic does the rest - - (And by container-magic, we mean "a crafty, dynamic, embedded DNS server") - ---- - -## Example in `worker/worker.py` - -```python -redis = Redis("`redis`") - - -def get_random_bytes(): - r = requests.get("http://`rng`/32") - return r.content - - -def hash_bytes(data): - r = requests.post("http://`hasher`/", - data=data, - headers={"Content-Type": "application/octet-stream"}) -``` - -(Full source code available [here]( -https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17 -)) - ---- - -## Show me the code! - -- You can check the GitHub repository with all the materials of this workshop: -
https://@@GITREPO@@ - -- The application is in the [dockercoins]( - https://@@GITREPO@@/tree/master/dockercoins) - subdirectory - -- The Compose file ([docker-compose.yml]( - https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)) - lists all 5 services - -- `redis` is using an official image from the Docker Hub - -- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile - -- Each service's Dockerfile and source code is in its own directory - - (`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory, - `rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/) - directory, etc.) - ---- - -## Our application at work - -- We can check the logs of our application's pods - -.exercise[ - -- Check the logs of the various components: - ```bash - kubectl logs deploy/worker - kubectl logs deploy/hasher - ``` - -] - ---- - -## Connecting to the web UI - -- "Logs are exciting and fun!" (No-one, ever) - -- The `webui` container exposes a web dashboard; let's view it - -.exercise[ - -- Check the NodePort allocated to the web UI: - ```bash - kubectl get svc webui - ``` - -- Open that in a web browser - -] - -A drawing area should show up, and after a few seconds, a blue -graph will appear. diff --git a/slides/pks/logistics.md b/slides/pks/logistics.md deleted file mode 100644 index 5aa248637..000000000 --- a/slides/pks/logistics.md +++ /dev/null @@ -1,11 +0,0 @@ -## Intros - -- Hello! We are: - - - .emoji[👨🏾‍🎓] Paul Czarkowski ([@pczarkowski](https://twitter.com/pczarkowski), VMware) - - .emoji[👨🏾‍🎓] Tyler Britten ([@tybritten](https://twitter.com/tybritten), VMWare) - - -- Feel free to interrupt for questions at any time - -- *Especially when you see full screen container pictures!* diff --git a/slides/pks/logs-centralized.md b/slides/pks/logs-centralized.md deleted file mode 100644 index 07af0ce3a..000000000 --- a/slides/pks/logs-centralized.md +++ /dev/null @@ -1,147 +0,0 @@ -# Centralized logging - -- Using `kubectl` or `stern` is simple; but it has drawbacks: - - - when a node goes down, its logs are not available anymore - - - we can only dump or stream logs; we want to search/index/count... - -- We want to send all our logs to a single place - -- We want to parse them (e.g. for HTTP logs) and index them - -- We want a nice web dashboard - --- - -- We are going to deploy an EFK stack - ---- - -## What is EFK? - -- EFK is three components: - - - ElasticSearch (to store and index log entries) - - - Fluentd (to get container logs, process them, and put them in ElasticSearch) - - - Kibana (to view/search log entries with a nice UI) - -- The only component that we need to access from outside the cluster will be Kibana - ---- - -## Deploying EFK on our cluster - -- We are going to use a YAML file describing all the required resources - -.exercise[ - -- Load the YAML file into our cluster: - ```bash - kubectl apply -f ~/container.training/k8s/efk.yaml - ``` - -] - -If we [look at the YAML file](https://github.com/jpetazzo/container.training/blob/master/k8s/efk.yaml), we see that -it creates a daemon set, two deployments, two services, -and a few roles and role bindings (to give fluentd the required permissions). - ---- - -## The itinerary of a log line (before Fluentd) - -- A container writes a line on stdout or stderr - -- Both are typically piped to the container engine (Docker or otherwise) - -- The container engine reads the line, and sends it to a logging driver - -- The timestamp and stream (stdout or stderr) is added to the log line - -- With the default configuration for Kubernetes, the line is written to a JSON file - - (`/var/log/containers/pod-name_namespace_container-id.log`) - -- That file is read when we invoke `kubectl logs`; we can access it directly too - ---- - -## The itinerary of a log line (with Fluentd) - -- Fluentd runs on each node (thanks to a daemon set) - -- It bind-mounts `/var/log/containers` from the host (to access these files) - -- It continuously scans this directory for new files; reads them; parses them - -- Each log line becomes a JSON object, fully annotated with extra information: -
container id, pod name, Kubernetes labels... - -- These JSON objects are stored in ElasticSearch - -- ElasticSearch indexes the JSON objects - -- We can access the logs through Kibana (and perform searches, counts, etc.) - ---- - -## Accessing Kibana - -- Kibana offers a web interface that is relatively straightforward - -- Let's check it out! - -.exercise[ - -- Check which `NodePort` was allocated to Kibana: - ```bash - kubectl get svc kibana - ``` - -- With our web browser, connect to Kibana - -] - ---- - -## Using Kibana - -*Note: this is not a Kibana workshop! So this section is deliberately very terse.* - -- The first time you connect to Kibana, you must "configure an index pattern" - -- Just use the one that is suggested, `@timestamp`.red[*] - -- Then click "Discover" (in the top-left corner) - -- You should see container logs - -- Advice: in the left column, select a few fields to display, e.g.: - - `kubernetes.host`, `kubernetes.pod_name`, `stream`, `log` - -.red[*]If you don't see `@timestamp`, it's probably because no logs exist yet. -
Wait a bit, and double-check the logging pipeline! - ---- - -## Caveat emptor - -We are using EFK because it is relatively straightforward -to deploy on Kubernetes, without having to redeploy or reconfigure -our cluster. But it doesn't mean that it will always be the best -option for your use-case. If you are running Kubernetes in the -cloud, you might consider using the cloud provider's logging -infrastructure (if it can be integrated with Kubernetes). - -The deployment method that we will use here has been simplified: -there is only one ElasticSearch node. In a real deployment, you -might use a cluster, both for performance and reliability reasons. -But this is outside of the scope of this chapter. - -The YAML file that we used creates all the resources in the -`default` namespace, for simplicity. In a real scenario, you will -create the resources in the `kube-system` namespace or in a dedicated namespace. diff --git a/slides/pks/octant.md b/slides/pks/octant.md deleted file mode 100644 index 0f7998051..000000000 --- a/slides/pks/octant.md +++ /dev/null @@ -1,17 +0,0 @@ -# Octant - -Octant is an open source tool from VMWare which is designed to be a Kubernetes workload visualization tool that runs locally and uses your Kubeconfig to connect to the Kubernetes cluster. - -Since Octant runs locally on your machine and only uses your kube credentials its [in theory at least] more secure than the kubernetes dashboard. - -.exercise[ - -- Run octant and browse through your resources: - ```bash - octant - ``` -] - --- - -*We can use Octant through the workshop to see our resources running in Kubernetes. If you don't have it already installed, you can ignore it.* diff --git a/slides/pks/ourapponkube.md b/slides/pks/ourapponkube.md deleted file mode 100644 index 8cf858b5b..000000000 --- a/slides/pks/ourapponkube.md +++ /dev/null @@ -1,139 +0,0 @@ -# Running our application on Kubernetes - -- We can now deploy our code (as well as a redis instance) - -.exercise[ - -- Deploy `redis`: - ```bash - kubectl create deployment redis --image=redis - ``` - -- Deploy everything else: - ```bash - for SERVICE in hasher rng webui worker; do - kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG - done - ``` - -] - ---- - -## Is this working? - -- After waiting for the deployment to complete, let's look at the logs! - - (Hint: use `kubectl get deploy -w` to watch deployment events) - -.exercise[ - - - -- Look at some logs: - ```bash - kubectl logs deploy/rng - kubectl logs deploy/worker - ``` - -] - --- - -🤔 `rng` is fine ... But not `worker`. - --- - -💡 Oh right! We forgot to `expose`. - ---- - -## Connecting containers together - -- Three deployments need to be reachable by others: `hasher`, `redis`, `rng` - -- `worker` doesn't need to be exposed - -- `webui` will be dealt with later - -.exercise[ - -- Expose each deployment, specifying the right port: - ```bash - kubectl expose deployment redis --port 6379 - kubectl expose deployment rng --port 80 - kubectl expose deployment hasher --port 80 - ``` - -] - ---- - -## Is this working yet? - -- The `worker` has an infinite loop, that retries 10 seconds after an error - -.exercise[ - -- Stream the worker's logs: - ```bash - kubectl logs deploy/worker --follow - ``` - - (Give it about 10 seconds to recover) - - - -] - --- - -We should now see the `worker`, well, working happily. - ---- - -## Exposing services for external access - -- Now we would like to access the Web UI - -- We will use `kubectl port-forward` because we don't want the whole world to see it. - -.exercise[ - -- Create a port forward for the Web UI: - ```bash - kubectl port-forward deploy/webui 8888:80 - ``` -- In a new terminal check you can access it: - ```bash - curl localhost:8888 - ``` -] - --- - -The output `Found. Redirecting to /index.html` tells us the port forward worked. - ---- - -## Accessing the web UI - -- We can now access the web UI from the port-forward. But nobody else can. - -.exercise[ - -- Open the web UI in your browser (http://localhost:8888/) - - - -] - --- - -*Alright, we're back to where we started, when we were running on a single node!* diff --git a/slides/pks/prereqs.md b/slides/pks/prereqs.md deleted file mode 100644 index 8d8a7aec7..000000000 --- a/slides/pks/prereqs.md +++ /dev/null @@ -1,114 +0,0 @@ -# Pre-requirements - -- Be comfortable with the UNIX command line - - - navigating directories - - - editing files - - - a little bit of bash-fu (environment variables, loops) - -- Some Docker knowledge - - - `docker run`, `docker ps`, `docker build` - - - ideally, you know how to write a Dockerfile and build it -
- (even if it's a `FROM` line and a couple of `RUN` commands) - -- It's totally OK if you are not a Docker expert! - ---- - -## software pre-requirements - -- You'll need the following software installed on your local laptop: - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -* [helm 3](https://helm.sh/docs/using_helm/#installing-helm) - -- Bonus tools - -* [octant](https://github.com/vmware/octant#installation) -* [stern](https://github.com/wercker/stern/releases/tag/1.11.0) -* [jq](https://stedolan.github.io/jq/download/) - ---- - -class: title - -*Tell me and I forget.* -
-*Teach me and I remember.* -
-*Involve me and I learn.* - -Misattributed to Benjamin Franklin - -[(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/) - ---- - -## Hands-on sections - -- The whole workshop is hands-on - -- You are invited to reproduce all the demos - -- You will be using conference wifi and a shared kubernetes cluster. Please be kind to both. - -- All hands-on sections are clearly identified, like the gray rectangle below - -.exercise[ - -- This is the stuff you're supposed to do! - -- Go to @@SLIDES@@ to view these slides - -- Join the chat room: @@CHAT@@ - - - -] - ---- - -class: in-person - -## Where are we going to run our containers? - ---- - -class: in-person - -## shared cluster dedicated to this workshop - -- A large Pivotal Container Service (PKS) cluster deployed to Google Cloud. - -- It will remain up for the duration of the workshop (and maybe a few days beyond) - -- You should have a credentials to log into the cluster. - - ---- - -class: in-person - -## Why don't we run containers locally? - -- Installing this stuff can be hard on some machines - - (32 bits CPU or OS... Laptops without administrator access... etc.) - -- *"The whole team downloaded all these container images from the WiFi! -
... and it went great!"* (Literally no-one ever) - -- All you need is a computer (or even a phone or tablet!), with: - - - an internet connection - - - a web browser - - - kubectl - - - helm diff --git a/slides/pks/sampleapp.md b/slides/pks/sampleapp.md deleted file mode 100644 index 4da4643ec..000000000 --- a/slides/pks/sampleapp.md +++ /dev/null @@ -1,145 +0,0 @@ -# Our sample application - -- DockerCoins - -![Diagram showing the 5 containers of the applications](images/dockercoins-diagram.svg) - - ---- - -## What is DockerCoins? - --- - -- It is a DockerCoin miner! .emoji[💰🐳📦🚢] - --- - -- No, you can't buy coffee with DockerCoins - --- - -- How DockerCoins works: - - - generate a few random bytes - - hash these bytes - - increment a counter (to keep track of speed) - - repeat forever! - --- - -- DockerCoins is *not* a cryptocurrency - - (the only common points are "randomness," "hashing," and "coins" in the name) - ---- - -## DockerCoins in the microservices era - -- DockerCoins is made of 5 services: - - - `rng` = web service generating random bytes - - `hasher` = web service computing hash of POSTed data - - `worker` = background process calling `rng` and `hasher` - - `webui` = web interface to watch progress - - `redis` = data store (holds a counter updated by `worker`) - -- These 5 services are visible in the application's Compose file, - [docker-compose.yml]( - https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) - ---- - -## How DockerCoins works - -- `worker` invokes web service `rng` to generate random bytes - -- `worker` invokes web service `hasher` to hash these bytes - -- `worker` does this in an infinite loop - -- every second, `worker` updates `redis` to indicate how many loops were done - -- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser - -## Service discovery in container-land - -How does each service find out the address of the other ones? - --- - -- We do not hard-code IP addresses in the code - -- We do not hard-code FQDNs in the code, either - -- We just connect to a service name, and container-magic does the rest - - (And by container-magic, we mean "a crafty, dynamic, embedded DNS server") - ---- - -## Example in `worker/worker.py` - -```python -redis = Redis("`redis`") - - -def get_random_bytes(): - r = requests.get("http://`rng`/32") - return r.content - - -def hash_bytes(data): - r = requests.post("http://`hasher`/", - data=data, - headers={"Content-Type": "application/octet-stream"}) -``` - -(Full source code available [here]( -https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17 -)) - ---- - -class: extra-details - -## Links, naming, and service discovery - -- Containers can have network aliases (resolvable through DNS) - -- Compose file version 2+ makes each container reachable through its service name - -- Compose file version 1 required "links" sections to accomplish this - -- Network aliases are automatically namespaced - - - you can have multiple apps declaring and using a service named `database` - - - containers in the blue app will resolve `database` to the IP of the blue database - - - containers in the green app will resolve `database` to the IP of the green database - ---- - -## Show me the code! - -- You can check the GitHub repository with all the materials of this workshop: -
https://@@GITREPO@@ - -- The application is in the [dockercoins]( - https://@@GITREPO@@/tree/master/dockercoins) - subdirectory - -- The Compose file ([docker-compose.yml]( - https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)) - lists all 5 services - -- `redis` is using an official image from the Docker Hub - -- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile - -- Each service's Dockerfile and source code is in its own directory - - (`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory, - `rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/) - directory, etc.) diff --git a/slides/pks/scalingdockercoins.md b/slides/pks/scalingdockercoins.md deleted file mode 100644 index 527058c8e..000000000 --- a/slides/pks/scalingdockercoins.md +++ /dev/null @@ -1,241 +0,0 @@ -# Scaling our demo app - -- Our ultimate goal is to get more DockerCoins - - (i.e. increase the number of loops per second shown on the web UI) - -- Let's look at the architecture again: - - ![DockerCoins architecture](images/dockercoins-diagram.svg) - -- The loop is done in the worker; - perhaps we could try adding more workers? - ---- - -## Adding another worker - -- All we have to do is scale the `worker` Deployment - -.exercise[ - -- Open two new terminals to check what's going on with pods and deployments: - ```bash - kubectl get pods -w - kubectl get deployments -w - ``` - - - -- Now, create more `worker` replicas: - ```bash - kubectl scale deployment worker --replicas=2 - ``` - -] - -After a few seconds, the graph in the web UI should show up. - ---- - -## Adding more workers - -- If 2 workers give us 2x speed, what about 3 workers? - -.exercise[ - -- Scale the `worker` Deployment further: - ```bash - kubectl scale deployment worker --replicas=3 - ``` - -] - -The graph in the web UI should go up again. - -(This is looking great! We're gonna be RICH!) - ---- - -## Adding even more workers - -- Let's see if 10 workers give us 10x speed! - -.exercise[ - -- Scale the `worker` Deployment to a bigger number: - ```bash - kubectl scale deployment worker --replicas=10 - ``` - -] - --- - -The graph will peak at 10 hashes/second. - -(We can add as many workers as we want: we will never go past 10 hashes/second.) - ---- - -class: extra-details - -## Didn't we briefly exceed 10 hashes/second? - -- It may *look like it*, because the web UI shows instant speed - -- The instant speed can briefly exceed 10 hashes/second - -- The average speed cannot - -- The instant speed can be biased because of how it's computed - ---- - -class: extra-details - -## Why instant speed is misleading - -- The instant speed is computed client-side by the web UI - -- The web UI checks the hash counter once per second -
- (and does a classic (h2-h1)/(t2-t1) speed computation) - -- The counter is updated once per second by the workers - -- These timings are not exact -
- (e.g. the web UI check interval is client-side JavaScript) - -- Sometimes, between two web UI counter measurements, -
- the workers are able to update the counter *twice* - -- During that cycle, the instant speed will appear to be much bigger -
- (but it will be compensated by lower instant speed before and after) - ---- - -## Why are we stuck at 10 hashes per second? - -- If this was high-quality, production code, we would have instrumentation - - (Datadog, Honeycomb, New Relic, statsd, Sumologic, ...) - -- It's not! - -- Perhaps we could benchmark our web services? - - (with tools like `ab`, or even simpler, `httping`) - ---- - -## Benchmarking our web services - -- We want to check `hasher` and `rng` - -- We are going to use `httping` - -- It's just like `ping`, but using HTTP `GET` requests - - (it measures how long it takes to perform one `GET` request) - -- It's used like this: - ``` - httping [-c count] http://host:port/path - ``` - -- Or even simpler: - ``` - httping ip.ad.dr.ess - ``` - -- We will use `httping` on the ClusterIP addresses of our services - ---- - -## Running a debug pod - -We don't have direct access to ClusterIP services, nor do we want to run a bunch of port-forwards. Instead we can run a Pod containing `httping` and then use `kubectl exec` to perform our debugging. - -.excercise[ - -- Run a debug pod - ```bash - kubectl run debug --image=paulczar/debug \ - --restart=Never -- sleep 6000 - ``` - -] - --- - -This will run our debug pod which contains tools like `httping` that will self-destruct after 6000 seconds. - ---- - -### Executing a command in a running pod - -- You may have need to occasionally run a command inside a pod. Rather than trying to run `SSH` inside a container you can use the `kubectl exec` command. - -.excercise[ - - - Run curl inside your debug pod: - ```bash - kubectl exec debug -- curl -s https://google.com - ``` -] - --- - -```html - -301 Moved -

301 Moved

-The document has moved -
here. - -``` - ---- - -## Service Discovery - -- Each of our services has a Cluster IP which we could get using `kubectl get services` - -- Or do it programmatically, like so: - ```bash - HASHER=$(kubectl get svc hasher -o go-template={{.spec.clusterIP}}) - RNG=$(kubectl get svc rng -o go-template={{.spec.clusterIP}}) - ``` - -- However Kubernetes has an in-cluster DNS server which means if you're inside the cluster you can simple use the service name as an endpoint. - ---- - -## Checking `hasher` and `rng` response times - -.exercise[ - -- Check the response times for both services: - ```bash - kubectl exec debug -- httping -c 3 hasher - kubectl exec debug -- httping -c 3 rng - ``` - -] - --- - -- `hasher` is fine (it should take a few milliseconds to reply) - -- `rng` is not (it should take about 700 milliseconds if there are 10 workers) - -- Something is wrong with `rng`, but ... what? diff --git a/slides/pks/security-kubectl-apply.md b/slides/pks/security-kubectl-apply.md deleted file mode 100644 index 9be59165d..000000000 --- a/slides/pks/security-kubectl-apply.md +++ /dev/null @@ -1,52 +0,0 @@ - -# Security implications of `kubectl apply` - -- When we do `kubectl apply -f `, we create arbitrary resources - -- Resources can be evil; imagine a `deployment` that ... - --- - - - starts bitcoin miners on the whole cluster - --- - - - hides in a non-default namespace - --- - - - bind-mounts our nodes' filesystem - --- - - - inserts SSH keys in the root account (on the node) - --- - - - encrypts our data and ransoms it - --- - - - ☠️☠️☠️ - ---- - -## `kubectl apply` is the new `curl | sh` - -- `curl | sh` is convenient - -- It's safe if you use HTTPS URLs from trusted sources - --- - -- `kubectl apply -f` is convenient - -- It's safe if you use HTTPS URLs from trusted sources - -- Example: the official setup instructions for most pod networks - --- - -- It introduces new failure modes - - (for instance, if you try to apply YAML from a link that's no longer valid) diff --git a/slides/pks/setup-k8s.md b/slides/pks/setup-k8s.md deleted file mode 100644 index 8a6f5b875..000000000 --- a/slides/pks/setup-k8s.md +++ /dev/null @@ -1,108 +0,0 @@ -# Setting up Kubernetes - -How did we set up these Kubernetes clusters that we're using? - --- - -- We used Pivotal Container Service (PKS) a multicloud Kubernetes broker. - -- But first we Created a GKE Kubernetes cluster - - We installed the Google Cloud Operator on GKE - - We installed PKS using the GCP Operator - - We installed this Kubernetes cluster using PKS - ---- - -# Setting up Kubernetes - -- How can I set up a basic Kubernetes lab at home? - --- - - - -- Run `kubeadm` on freshly installed VM instances running Ubuntu LTS - - 1. Install Docker - - 2. Install Kubernetes packages - - 3. Run `kubeadm init` on the first node (it deploys the control plane on that node) - - 4. Set up Weave (the overlay network) -
- (that step is just one `kubectl apply` command; discussed later) - - 5. Run `kubeadm join` on the other nodes (with the token produced by `kubeadm init`) - - 6. Copy the configuration file generated by `kubeadm init` - -- Check the [prepare VMs README](https://@@GITREPO@@/blob/master/prepare-vms/README.md) for more details - ---- - -## `kubeadm` drawbacks - -- Doesn't set up Docker or any other container engine - -- Doesn't set up the overlay network - -- Doesn't set up multi-master (no high availability) - --- - - (At least ... not yet! Though it's [experimental in 1.12](https://kubernetes.io/docs/setup/independent/high-availability/).) - --- - -- "It's still twice as many steps as setting up a Swarm cluster 😕" -- Jérôme - ---- - -## Other deployment options - -- [AKS](https://azure.microsoft.com/services/kubernetes-service/): - managed Kubernetes on Azure - -- [GKE](https://cloud.google.com/kubernetes-engine/): - managed Kubernetes on Google Cloud - -- [EKS](https://aws.amazon.com/eks/), - [eksctl](https://eksctl.io/): - managed Kubernetes on AWS - -- [kops](https://github.com/kubernetes/kops): - customizable deployments on AWS, Digital Ocean, GCE (beta), vSphere (alpha) - -- [minikube](https://kubernetes.io/docs/setup/minikube/), - [kubespawn](https://github.com/kinvolk/kube-spawn), - [Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/): - for local development - -- [kubicorn](https://github.com/kubicorn/kubicorn), - the [Cluster API](https://blogs.vmware.com/cloudnative/2019/03/14/what-and-why-of-cluster-api/): - deploy your clusters declaratively, "the Kubernetes way" - ---- - -## Even more deployment options - -- If you like Ansible: - [kubespray](https://github.com/kubernetes-incubator/kubespray) - -- If you like Terraform: - [typhoon](https://github.com/poseidon/typhoon) - -- If you like Terraform and Puppet: - [tarmak](https://github.com/jetstack/tarmak) - -- You can also learn how to install every component manually, with - the excellent tutorial [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) - - *Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.* - -- There are also many commercial options available! - -- For a longer list, check the Kubernetes documentation: -
- it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/#production-environment) to set up Kubernetes. diff --git a/slides/pks/title.md b/slides/pks/title.md deleted file mode 100644 index 568203a94..000000000 --- a/slides/pks/title.md +++ /dev/null @@ -1,23 +0,0 @@ -class: title, self-paced - -@@TITLE@@ - -.nav[*Self-paced version*] - ---- - -class: title, in-person - -@@TITLE@@

- -.footnote[ -**Be kind to the WiFi!**
- -*Don't use your hotspot.*
-*Don't stream videos or download big files during the workshop[.](https://www.youtube.com/watch?v=h16zyxiwDLY)*
-*Thank you!* - -**Slides: @@SLIDES@@**
-**Credentials: https://tinyurl.com/k8scamp**
-**Login: https://gangway.workshop.demo.paulczar.wtf** -] diff --git a/slides/pks/wp/values.yaml b/slides/pks/wp/values.yaml deleted file mode 100644 index cbc37988b..000000000 --- a/slides/pks/wp/values.yaml +++ /dev/null @@ -1,18 +0,0 @@ -service: - type: ClusterIP -persistence: - enabled: false -mariadb: - master: - persistence: - enabled: false -ingress: - enabled: true - certManager: true - hosts: - - name: user1-wp.ingress.workshop.paulczar.wtf - path: / - tls: - - hosts: - - user1-wp.ingress.workshop.paulczar.wtf - secretName: wordpress-tls \ No newline at end of file diff --git a/slides/spring-one-tour.yml.no b/slides/spring-one-tour.yml.no deleted file mode 100644 index 1e06d9f6d..000000000 --- a/slides/spring-one-tour.yml.no +++ /dev/null @@ -1,62 +0,0 @@ -title: | - Spring One Tour - Kubernetes Workshop - - -#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)" -#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)" -chat: "In person!" - -gitrepo: github.com/paulczar/container.training -gitbranch: - -slides: http://k8s.camp/s1t/ - -#slidenumberprefix: "#SomeHashTag — " - -exclude: -- self-paced - -chapters: -- pks/title.md -- pks/logistics.md -- k8s/intro.md -- shared/about-slides.md -- shared/toc.md - -- - - pks/prereqs.md - - pks/connecting.md - - pks/concepts-k8s.md - - - shared/declarative.md - - k8s/declarative.md - - pks/dashboard.md - - pks/octant.md - - pks/kubectlget.md -- - pks/kubectlrun.md - - k8s/deploymentslideshow.md - - pks/kubectlexpose.md - - #- k8s/shippingimages.md - #- k8s/buildshiprun-selfhosted.md - # - k8s/buildshiprun-dockerhub.md - # - pks/sampleapp.md - - pks/httpenv-update.md - # - pks/ourapponkube.md - - -# - - k8s/logs-cli.md - # - pks/logs-centralized.md - # - k8s/namespaces.md - - pks/helm-intro.md - #- k8s/helm-chart-format.md - - k8s/helm-create-basic-chart.md - #- k8s/helm-create-better-chart.md - #- k8s/helm-secrets.md - #- k8s/kustomize.md - #- k8s/netpol.md - - k8s/whatsnext.md -# - k8s/links.md -# Bridget-specific - - k8s/links-bridget.md - - shared/thankyou.md From 602e0c5d1646575875a8d8392bbd457bc3ddc55d Mon Sep 17 00:00:00 2001 From: Paul Czarkowski Date: Tue, 5 May 2020 11:56:22 -0500 Subject: [PATCH 14/14] move hosts and logistics details into manifests Signed-off-by: Paul Czarkowski --- slides/kube-fullday-namespaced.yml | 33 ++++++++++++++++++++++-------- slides/logistics-template.md | 10 +++------ slides/markmaker.py | 21 +++++++++++++++++++ 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/slides/kube-fullday-namespaced.yml b/slides/kube-fullday-namespaced.yml index b4ee2a557..1a5947a1b 100644 --- a/slides/kube-fullday-namespaced.yml +++ b/slides/kube-fullday-namespaced.yml @@ -10,6 +10,23 @@ gitrepo: github.com/jpetazzo/container.training slides: http://container.training/ +hosts: + - name: "Ann O'Nymous" + emoji: "👩🏻‍🏫" + company: "Megacorp Inc" + twitter: "annonypotomus" + - name: "Stu Dent" + emoji: "👨🏾‍🎓" + company: "University of Wakanda" + twitter: "stufromwakanda" + +logistics: |- + - logistics: + - The workshop will run from 9:00am + - There will be a lunch break at 12:00pm + - Plus coffee breaks :) + + #slidenumberprefix: "#SomeHashTag — " exclude: @@ -75,14 +92,14 @@ content: # TODO - Update to show nginx or generic Ingress vs Traefik specific. # - k8s/ingress.md #- k8s/kustomize.md - #- k8s/helm-intro.md - #- k8s/helm-chart-format.md - #- k8s/helm-create-basic-chart.md - #- k8s/helm-create-better-chart.md - #- k8s/helm-secrets.md - #- k8s/exercise-helm.md - #- k8s/create-chart.md - #- k8s/create-more-charts.md + # - k8s/helm-intro.md + # - k8s/helm-chart-format.md + # - k8s/helm-create-basic-chart.md + # - k8s/helm-create-better-chart.md + # - k8s/helm-secrets.md + # - k8s/exercise-helm.md + # - k8s/create-chart.md + # - k8s/create-more-charts.md #- k8s/netpol.md #- k8s/authn-authz.md #- k8s/csr-api.md diff --git a/slides/logistics-template.md b/slides/logistics-template.md index 33388a069..2d03bd089 100644 --- a/slides/logistics-template.md +++ b/slides/logistics-template.md @@ -3,13 +3,13 @@ - This slide should be customized by the tutorial instructor(s). - Hello! We are: +@@HOSTS@@ + -- The workshop will run from ... - -- There will be a lunch break at ... - - (And coffee breaks!) +@@LOGISTICS@@ - Feel free to interrupt for questions at any time diff --git a/slides/markmaker.py b/slides/markmaker.py index 0ab314ffa..1135d91ef 100755 --- a/slides/markmaker.py +++ b/slides/markmaker.py @@ -98,6 +98,11 @@ def generatefromyaml(manifest, filename): if "html" not in manifest: manifest["html"] = filename + ".html" + if "logistics" not in manifest: + logistics = "\n- The workshop will run from ...\n- There will be a lunch break at ...\n- Plus coffee breaks!" + logging.warning("logistics not found, using default - {}".format(logistics)) + manifest["logistics"] = logistics + markdown, titles = processcontent(manifest["content"], filename) logging.debug("Found {} titles.".format(len(titles))) toc = gentoc(titles) @@ -111,6 +116,20 @@ def generatefromyaml(manifest, filename): logging.warning("'exclude' is empty.") exclude = ",".join('"{}"'.format(c) for c in exclude) + hosts = manifest.get("hosts", []) + logging.debug("hosts={!r}".format(hosts)) + if not hosts: + logging.warning("'hosts' is empty. Using defaults.") + host_html = "" + else: + host_html = "
    \n" + for host in flatten(hosts): + logging.debug("host: {}".format(host["name"])) + # host_html += "
  • {}\n".format(host["name"]) + host_html += "
  • {} {} @{}, {}\n".format(host["emoji"],host["name"],host["twitter"],host["twitter"],host["company"]) + host_html += "
\n" + logging.debug("host_html: {}".format(host_html)) + # Insert build info. This is super hackish. markdown = markdown.replace( @@ -123,6 +142,8 @@ def generatefromyaml(manifest, filename): html = open("workshop.html").read() html = html.replace("@@MARKDOWN@@", markdown) html = html.replace("@@EXCLUDE@@", exclude) + html = html.replace("@@HOSTS@@", host_html) + html = html.replace("@@LOGISTICS@@", manifest["logistics"]) html = html.replace("@@CHAT@@", manifest["chat"]) html = html.replace("@@GITREPO@@", manifest["gitrepo"]) html = html.replace("@@SLIDES@@", manifest["slides"])