Skip to content

Commit

Permalink
More tests
Browse files Browse the repository at this point in the history
  • Loading branch information
alex-zaitsev committed Nov 29, 2024
1 parent 5389882 commit 58b484e
Show file tree
Hide file tree
Showing 7 changed files with 140 additions and 51 deletions.
30 changes: 18 additions & 12 deletions docs/chk-examples/clickhouse-keeper-1-node-for-test-only.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ spec:
templates:
podTemplate: clickhouse-keeper
volumeClaimTemplate: data-volume
clusterServiceTemplate: zookeeper
configuration:
clusters:
- name: test
Expand All @@ -31,14 +30,21 @@ spec:
resources:
requests:
storage: 100Mi
serviceTemplates: # Service drop-in replacement in tests
- name: zookeeper
generateName: zookeeper
spec:
ports:
- port: 2181
name: client
- port: 7000
name: prometheus
type: ClusterIP
clusterIP: None
---
# Fake Service to drop-in replacement in tests
apiVersion: v1
kind: Service
metadata:
name: zookeeper
labels:
clickhouse-keeper.altinity.com/app: chop
clickhouse-keeper.altinity.com/chk: clickhouse-keeper
spec:
ports:
- port: 2181
name: client
- port: 7000
name: prometheus
selector:
clickhouse-keeper.altinity.com/chk: clickhouse-keeper
clickhouse-keeper.altinity.com/ready: "yes"
30 changes: 18 additions & 12 deletions docs/chk-examples/clickhouse-keeper-3-node-for-test-only.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ spec:
templates:
podTemplate: clickhouse-keeper
volumeClaimTemplate: data-volume
clusterServiceTemplate: zookeeper
configuration:
clusters:
- name: test
Expand All @@ -31,14 +30,21 @@ spec:
resources:
requests:
storage: 100Mi
serviceTemplates: # Service drop-in replacement in tests
- name: zookeeper
generateName: zookeeper
spec:
ports:
- port: 2181
name: client
- port: 7000
name: prometheus
type: ClusterIP
clusterIP: None
---
# Fake Service to drop-in replacement in tests
apiVersion: v1
kind: Service
metadata:
name: zookeeper
labels:
clickhouse-keeper.altinity.com/app: chop
clickhouse-keeper.altinity.com/chk: clickhouse-keeper
spec:
ports:
- port: 2181
name: client
- port: 7000
name: prometheus
selector:
clickhouse-keeper.altinity.com/chk: clickhouse-keeper
clickhouse-keeper.altinity.com/ready: "yes"
4 changes: 2 additions & 2 deletions tests/e2e/manifests/chi/test-010-zkroot.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@ spec:
useTemplates:
- name: clickhouse-version
defaults:
templates:
templates:
logVolumeClaimTemplate: default
configuration:
zookeeper: # Add Zookeeper
nodes:
- host: zookeeper
# port: 2181
port: 2181
root: "/clickhouse/test-010-zkroot"
session_timeout_ms: 30000
operation_timeout_ms: 10000
Expand Down
30 changes: 30 additions & 0 deletions tests/e2e/manifests/chi/test-014-0-replication-2-1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
apiVersion: "clickhouse.altinity.com/v1"

kind: "ClickHouseInstallation"

metadata:
name: test-014-replication

spec:
# reconciling:
# policy: wait
useTemplates:
- name: clickhouse-version
- name: persistent-volume
configuration:
zookeeper:
nodes:
- host: zookeeper
port: 2181
session_timeout_ms: 5000
operation_timeout_ms: 5000
root: /test/root/path
clusters:
- name: default
layout:
replicasCount: 2
shardsCount: 1
profiles:
default/database_atomic_wait_for_drop_and_detach_synchronously: 1
default/allow_experimental_live_view: 1
default/allow_experimental_database_replicated: 1
92 changes: 70 additions & 22 deletions tests/e2e/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,6 @@ def test_009_2(self, version_from="0.23.7", version_to=None):
def test_010(self):
create_shell_namespace_clickhouse_template()

util.set_operator_version(current().context.operator_version)
util.require_keeper(keeper_type=self.context.keeper_type)

kubectl.create_and_check(
Expand All @@ -624,6 +623,41 @@ def test_010(self):
with Finally("I clean up"):
delete_test_namespace()

@TestScenario
@Name("test_010_1. Test zookeeper initialization AFTER starting a cluster")
def test_010_1(self):
create_shell_namespace_clickhouse_template()
chi = "test-010-zkroot"

kubectl.create_and_check(
manifest="manifests/chi/test-010-zkroot.yaml",
check={
"apply_templates": {
current().context.clickhouse_template,
},
"do_not_delete": 1,
"chi_status": "InProgress"
},
)

with Then("Wait 60 seconds for operator to start creating ZooKeeper root"):
time.sleep(60)

# with Then("CHI should be in progress with no pods created yet"):
# assert kubectl.get_chi_status(chi) == "InProgress"
# assert kubectl.get_count("pod", chi = chi) == 0

util.require_keeper(keeper_type=self.context.keeper_type)

kubectl.wait_chi_status(chi, "Completed")

with And("ClickHouse should not complain regarding zookeeper path"):
out = clickhouse.query_with_error("test-010-zkroot", "select path from system.zookeeper where path = '/' limit 1")
assert "/" == out

with Finally("I clean up"):
delete_test_namespace()


def get_user_xml_from_configmap(chi, user):
users_xml = kubectl.get("configmap", f"chi-{chi}-common-usersd")["data"]["chop-generated-users.xml"]
Expand Down Expand Up @@ -1565,9 +1599,7 @@ def check_schema_propagation(replicas):
kubectl.launch(f"delete pod {self.context.keeper_type}-0")
time.sleep(1)

with Then(
f"try insert into the table while {self.context.keeper_type} offline table should be in readonly mode"
):
with Then(f"try insert into the table while {self.context.keeper_type} offline table should be in readonly mode"):
out = clickhouse.query_with_error(chi_name, "SET insert_keeper_max_retries=0; INSERT INTO test_local_014 VALUES(2)")
assert "Table is in readonly mode" in out

Expand Down Expand Up @@ -1597,28 +1629,44 @@ def check_schema_propagation(replicas):
time.sleep(10)
check_schema_propagation([1])

with When("Remove shard"):
manifest = "manifests/chi/test-014-0-replication-2-1.yaml"
chi_name = yaml_manifest.get_name(util.get_full_path(manifest))
kubectl.create_and_check(
manifest=manifest,
check={
"pod_count": 2,
"do_not_delete": 1,
},
timeout=600,
)
with Then("Shard should be deleted in ZooKeeper"):
out = clickhouse.query_with_error(
chi_name,
f"SELECT count() FROM system.zookeeper WHERE path ='/clickhouse/{cluster}/tables/1/default'",
)
note(f"Found {out} replicated tables in {self.context.keeper_type}")
# FIXME: it fails
# assert "DB::Exception: No node" in out or out == "0"

with When("Delete chi"):
kubectl.delete_chi("test-014-replication")

with Then(
f"Tables should be deleted in {self.context.keeper_type}. We can test it re-creating the chi and checking {self.context.keeper_type} contents"
):
manifest = "manifests/chi/test-014-0-replication-1.yaml"
kubectl.create_and_check(
manifest=manifest,
check={
"pod_count": 2,
"pdb": {"default": 1},
"do_not_delete": 1,
},
manifest = "manifests/chi/test-014-0-replication-1.yaml"
kubectl.create_and_check(
manifest=manifest,
check={
"pod_count": 2,
"do_not_delete": 1,
},
)
with Then("Tables are deleted in ZooKeeper"):
out = clickhouse.query_with_error(
chi_name,
f"SELECT count() FROM system.zookeeper WHERE path ='/clickhouse/{cluster}/tables/0/default'",
)
with Then("Tables are deleted in ZooKeeper"):
out = clickhouse.query_with_error(
chi_name,
f"SELECT count() FROM system.zookeeper WHERE path ='/clickhouse/{chi_name}/tables/0/default'",
)
note(f"Found {out} replicated tables in {self.context.keeper_type}")
assert "DB::Exception: No node" in out or out == "0"
note(f"Found {out} replicated tables in {self.context.keeper_type}")
assert "DB::Exception: No node" in out or out == "0"

with Finally("I clean up"):
delete_test_namespace()
Expand Down
4 changes: 2 additions & 2 deletions tests/e2e/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def require_keeper(keeper_manifest="", keeper_type=settings.keeper_type, force_i
keeper_manifest = f"../../deploy/clickhouse-keeper/clickhouse-keeper-manually/{keeper_manifest}"
if keeper_type == "chk":
keeper_manifest = (
"clickhouse-keeper-1-node-for-test.yaml" if keeper_manifest == "" else keeper_manifest
"clickhouse-keeper-1-node-for-test-only.yaml" if keeper_manifest == "" else keeper_manifest
)
keeper_manifest = f"../../deploy/clickhouse-keeper/clickhouse-keeper-with-CHK-resource/{keeper_manifest}"
if keeper_type == "zookeeper-operator":
Expand All @@ -102,7 +102,7 @@ def require_keeper(keeper_manifest="", keeper_type=settings.keeper_type, force_i
expected_docs = {
"zookeeper": 5 if "scaleout-pvc" in keeper_manifest else 4,
"clickhouse-keeper": 7,
"chk": 1,
"chk": 2,
"zookeeper-operator": 3 if "probes" in keeper_manifest else 1,
}
expected_pod_prefix = {
Expand Down
1 change: 0 additions & 1 deletion tests/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
xfails = {
# test_operator.py
"/regression/e2e.test_operator/test_008*": [(Fail, "Test 008 sometimes fails due to unknown reasons")],
"/regression/e2e.test_operator/test_030:": [(Fail, "FIXME: Test 030 started to fail in 0.24.1")],
"/regression/e2e.test_operator/test_032:": [(Fail, "Test 032 sometimes fails due to unknown reasons")],
# test_clickhouse.py
"/regression/e2e.test_clickhouse/test_ch_001*": [(Fail, "Insert Quorum test need to refactoring")],
Expand Down

0 comments on commit 58b484e

Please sign in to comment.