Skip to content

Commit

Permalink
Merge pull request #69 from Altinity/release-0.2beta
Browse files Browse the repository at this point in the history
Release 0.2.0-beta
  • Loading branch information
alex-zaitsev authored Apr 15, 2019
2 parents 6050a7d + 7c9db8f commit 0eb3535
Show file tree
Hide file tree
Showing 63 changed files with 1,972 additions and 782 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ FROM alpine:3.8
RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/*
WORKDIR /
COPY --from=builder /tmp/clickhouse-operator .
ENTRYPOINT ["./clickhouse-operator"]
ENTRYPOINT ["./clickhouse-operator", "-alsologtostderr=true", "-v=1"]
17 changes: 12 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,17 @@ The ClickHouse Operator for Kubernetes currently provides the following:

**Advanced setups**
* [Detailed Operator Installation Instructions][detailed]
* [ClickHouse Installation Custom Resource specification][crd_explained]
* [How to setup ClickHouse cluster with replication][replication_setup]
* [Zookeeper Setup][zookeeper_setup]
* [Operator Configuration][operator_configuration]
* [Setup ClickHouse cluster with replication][replication_setup]
* [Setting up Zookeeper][zookeeper_setup]
* [Persistent Storage Configuration][storage]
* [Update ClickHouseInstallation with **Rolling Update** - add replication to existing cluster][update_cluster_add_replication]
* [Update ClickHouseInstallation with **Rolling Update** - update ClickHouse version][update_clickhouse_version]
* [ClickHouse Installation Custom Resource specification][crd_explained]

**Maintanance tasks**
* [Adding replication to an existing ClickHouse cluster][update_cluster_add_replication]
* Adding shards and replicas
* [Automatic schema creation][schema_migration]
* [Update ClickHouse version][update_clickhouse_version]

**Monitoring**
* [Prometheus & clickhouse-operator integration][prometheus_setup]
Expand All @@ -60,3 +65,5 @@ See [LICENSE](./LICENSE) for more details.
[storage]: ./docs/storage.md
[update_cluster_add_replication]: ./docs/chi_update_add_replication.md
[update_clickhouse_version]: ./docs/chi_update_clickhouse_version.md
[schema_migration]: ./docs/schema_migration.md
[operator_configuration]: ./docs/operator_configuration.md
9 changes: 9 additions & 0 deletions build_image_dev.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/bash

# Builds a dev docker image
# Alter image tag in deployment spec in dev env in order to use it

cat Dockerfile | envsubst | docker build -t sunsingerus/clickhouse-operator:dev .

docker login -u sunsingerus
docker push sunsingerus/clickhouse-operator:dev
2 changes: 1 addition & 1 deletion cmd/clickhouse-operator/app/clickhouse_operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ import (
)

// Version defines current build version
const Version = "0.1.7beta"
const Version = "0.2.0-beta"

// Prometheus exporter defaults
const (
Expand Down
35 changes: 32 additions & 3 deletions config/config.yaml
Original file line number Diff line number Diff line change
@@ -1,12 +1,41 @@
# Namespaces where clickhouse-operator listens for events.
# Concurrently running operators should listen on different namespaces
namespaces:
- dev
- info
- onemore

commonConfigsPath: config.d
deploymentConfigsPath: conf.d
usersConfigsPath: users.d
###########################################
##
## Additional Configuration Files Section
##
###########################################

# Path to folder where ClickHouse configuration files common for all instances within CHI are located.
chCommonConfigsPath: config.d

# Path to folder where ClickHouse configuration files unique for each instances within CHI are located.
chDeploymentConfigsPath: conf.d

# Path to folder where ClickHouse configuration files with users settings are located.
# Files are common for all instances within CHI
chUsersConfigsPath: users.d

# Path to folder where ClickHouseInstallation .yaml manifests are located.
# Manifests are applied in sorted alpha-numeric order
chiTemplatesPath: templates.d

###########################################
##
## Cluster Update Section
##
###########################################

# How many seconds to wait for created/updated StatefulSet to be Ready
statefulSetUpdateTimeout: 50

# How many seconds to wait between checks for created/updated StatefulSet status
statefulSetUpdatePollPeriod: 2

# What to do in case created/updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
onStatefulSetUpdateFailureAction: abort
51 changes: 51 additions & 0 deletions config/templates.d/t1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "template1"
spec:
defaults:
replicasUseFQDN: 0 # 0 - by default, 1 - enabled
distributedDDL:
profile: default
deployment:
zone:
matchLabels:
clickhouse.altinity.com/zone: zone1
podTemplate: clickhouse-v18.16.1
volumeClaimTemplate: default
configuration:
zookeeper:
nodes:
- host: zk-statefulset-0.zk-service.default.svc.cluster.local
port: 2181
- host: zk-statefulset-1.zk-service.default.svc.cluster.local
port: 2181
- host: zk-statefulset-2.zk-service.default.svc.cluster.local
port: 2181
users:
test/profile: default
test/quotas: default
readonly/profile: readonly
profiles:
readonly/readonly: "1"
default/max_memory_usage: "1000000000"
quotas:
default/interval/duration: "3600"
settings:
compression/case/method: zstd
templates:
podTemplates:
- name: clickhouse-v18.16.1
containers:
- name: clickhouse
image: yandex/clickhouse-server:18.16.1
volumeMounts:
- name: clickhouse-data-test
mountPath: /var/lib/clickhouse
- name: clickhouse-v18.16.2
containers:
- name: clickhouse
image: yandex/clickhouse-server:18.16.2
volumeMounts:
- name: clickhouse-data-test
mountPath: /var/lib/clickhouse
21 changes: 21 additions & 0 deletions config/templates.d/t2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "template2"
spec:
templates:
podTemplates:
- name: clickhouse-v19.4.3.11
containers:
- name: clickhouse
image: yandex/clickhouse-server:19.4.3.11
volumeMounts:
- name: clickhouse-data-test
mountPath: /var/lib/clickhouse
- name: clickhouse-v19.3.9.12
containers:
- name: clickhouse
image: yandex/clickhouse-server:19.3.9.12
volumeMounts:
- name: clickhouse-data-test
mountPath: /var/lib/clickhouse
20 changes: 16 additions & 4 deletions docs/chi_update_add_replication.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,16 @@ deployment.apps/clickhouse-operator 1/1 1 1 17s
NAME DESIRED CURRENT READY AGE
replicaset.apps/clickhouse-operator-5cbc47484 1 1 1 17s
```
Now let's install ClickHouse from provided examples. Manifest file with initial position is [07-rolling-update-01-initial-position.yaml](./examples/07-rolling-update-01-initial-position.yaml):
Now let's install ClickHouse from provided examples.
There are two **rolling update** examples presented:
1. Simple stateless cluster: [initial position](./examples/07-rolling-update-stateless-01-initial-position.yaml) and [update](./examples/07-rolling-update-stateless-02-apply-update.yaml)
1. Stateful cluster with Persistent Volumes: [initial position](./examples/09-rolling-update-stateful-01-initial-position.yaml) and [update](./examples/09-rolling-update-stateful-02-apply-update.yaml)

## Simple Rolling Update Example

Let's go with simple stateless cluster. Manifest file with initial position is [07-rolling-update-stateless-01-initial-position.yaml](./examples/07-rolling-update-stateless-01-initial-position.yaml):
```bash
kubectl -n dev apply -f 07-rolling-update-01-initial-position.yaml
kubectl -n dev apply -f 07-rolling-update-stateless-01-initial-position.yaml
```
Check initial position. We should have cluster up and running:
```bash
Expand Down Expand Up @@ -75,9 +82,9 @@ All is well.
Let's run update and change `.yaml` manifest so we'll have replication available.

In order to have replication correctly setup, we need to specify `Zookeeper` (which is assumed to be running already) and specify replicas for ClickHouse.
Manifest file with updates specified is [07-rolling-update-02-apply-update.yaml](./examples/07-rolling-update-02-apply-update.yaml):
Manifest file with updates specified is [07-rolling-update-stateless-02-apply-update.yaml](./examples/07-rolling-update-stateless-02-apply-update.yaml):
```bash
kubectl -n dev apply -f 07-rolling-update-02-apply-update.yaml
kubectl -n dev apply -f 07-rolling-update-stateless-02-apply-update.yaml
```
And let's watch on how update is rolling over:
```text
Expand Down Expand Up @@ -131,3 +138,8 @@ root@chi-d02eaa-347e-0-0-0:/# cat /etc/clickhouse-server/config.d/zookeeper.xml
</distributed_ddl>
</yandex>
```

## Rolling Update with State Example
Stateful cluster with Persistent Volumes examples are presented as [initial position](./examples/09-rolling-update-stateful-01-initial-position.yaml) and [update](./examples/09-rolling-update-stateful-02-apply-update.yaml)
The structure of the example is the same as for simple example, but Persistent Volumes are used. So this example is better to be run on cloud provider with Dynamic Volumes Provisioning available.

4 changes: 2 additions & 2 deletions docs/examples/01-standard-layout-01-1shard-1repl.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "example-01"
name: "standard-01"
spec:
configuration:
clusters:
- name: "example-01-1shard-1repl"
- name: "standard-01-1shard-1repl"
layout:
type: Standard
shardsCount: 1
Expand Down
4 changes: 2 additions & 2 deletions docs/examples/01-standard-layout-02-1shard-2repl.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "example-03"
name: "standard-02"
spec:
configuration:
clusters:
- name: "example-03-1shard-2repl"
- name: "standard-02-1shard-2repl"
layout:
type: Standard
replicasCount: 2
4 changes: 2 additions & 2 deletions docs/examples/01-standard-layout-03-2shard-1repl.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "example-02"
name: "standard-03"
spec:
configuration:
clusters:
- name: "example-02-2shard-1repl"
- name: "standard-03-2shard-1repl"
layout:
type: Standard
shardsCount: 2
8 changes: 4 additions & 4 deletions docs/examples/01-standard-layout-04-multiple-clusters.yaml
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "example-04"
name: "standard-04"
spec:
configuration:
clusters:
- name: "example-04-not-sharded-replicated"
- name: "standard-04-not-sharded-replicated"
layout:
type: Standard
shardsCount: 1
replicasCount: 2
- name: "example-04-sharded-not-replicated"
- name: "standard-04-sharded-not-replicated"
layout:
type: Standard
shardsCount: 2
- name: "example-04-sharded-replicated"
- name: "standard-04-sharded-replicated"
layout:
type: Standard
replicasCount: 2
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "standard-01-simple-pv"
spec:
configuration:
clusters:
- name: "standard-01-simple-pv"
layout:
type: Standard
shardsCount: 1
replicasCount: 1
defaults:
deployment:
volumeClaimTemplate: volumeclaim-template
templates:
volumeClaimTemplates:
- name: volumeclaim-template
persistentVolumeClaim:
metadata:
name: USE_DEFAULT_NAME
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 500Mi
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "standard-02-deployment-pv"
spec:
configuration:
clusters:
- name: "standard-02-deployment-pv"
# Deployment is specified for this cluster explicitly
deployment:
podTemplate: pod-template-with-volume
volumeClaimTemplate: storage-vc-template
layout:
type: Standard
shardsCount: 1
replicasCount: 1

templates:
podTemplates:
- name: pod-template-with-volume
containers:
- name: clickhouse
image: yandex/clickhouse-server:19.3.7
ports:
- name: http
containerPort: 8123
- name: client
containerPort: 9000
- name: interserver
containerPort: 9009
volumeMounts:
- name: clickhouse-data-storage
mountPath: /var/lib/clickhouse

volumeClaimTemplates:
- name: storage-vc-template
persistentVolumeClaim:
metadata:
name: clickhouse-data-storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
Loading

0 comments on commit 0eb3535

Please sign in to comment.