From a4c37d47a45d3dc5621e0fe3a4ad90011c2d10c3 Mon Sep 17 00:00:00 2001 From: nicholasyang Date: Fri, 13 Sep 2024 17:57:36 +0800 Subject: [PATCH] Dev: behave: remove unused functional tests --- .github/workflows/crmsh-ci.yml | 16 -------- codecov.yml | 4 +- data-manifest | 1 - test/features/bootstrap_bugs.feature | 32 +--------------- .../bootstrap_init_join_remove.feature | 18 --------- test/features/healthcheck.feature | 37 ------------------- 6 files changed, 3 insertions(+), 105 deletions(-) delete mode 100644 test/features/healthcheck.feature diff --git a/.github/workflows/crmsh-ci.yml b/.github/workflows/crmsh-ci.yml index 4b78e14a44..3757c1fda4 100644 --- a/.github/workflows/crmsh-ci.yml +++ b/.github/workflows/crmsh-ci.yml @@ -357,22 +357,6 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} flags: integration - functional_test_healthcheck: - runs-on: ubuntu-20.04 - timeout-minutes: 40 - steps: - - uses: actions/checkout@v4 - - name: functional test for healthcheck - run: | - echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json - sudo systemctl restart docker.service - index=`$GET_INDEX_OF healthcheck` - $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u - - uses: codecov/codecov-action@v4 - with: - token: ${{ secrets.CODECOV_TOKEN }} - flags: integration - functional_test_cluster_api: runs-on: ubuntu-20.04 timeout-minutes: 40 diff --git a/codecov.yml b/codecov.yml index 520acb9383..e45cf0f1f4 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,7 +8,7 @@ coverage: threshold: 0.1% codecov: notify: - after_n_builds: 26 + after_n_builds: 25 comment: - after_n_builds: 26 + after_n_builds: 25 layout: "condensed_header, flags, files, condensed_footer" diff --git a/data-manifest b/data-manifest index 2b0c6b2129..5363434938 100644 --- a/data-manifest +++ b/data-manifest @@ -78,7 +78,6 @@ test/features/coveragerc test/features/crm_report_bugs.feature test/features/environment.py test/features/geo_setup.feature -test/features/healthcheck.feature test/features/ocfs2.feature test/features/qdevice_options.feature test/features/qdevice_setup_remove.feature diff --git a/test/features/bootstrap_bugs.feature b/test/features/bootstrap_bugs.feature index 3f252b6e0c..7c62ea875d 100644 --- a/test/features/bootstrap_bugs.feature +++ b/test/features/bootstrap_bugs.feature @@ -183,20 +183,10 @@ Feature: Regression test for bootstrap bugs Given Cluster service is "stopped" on "hanode1" And Cluster service is "stopped" on "hanode2" When Run "crm cluster init -y" on "hanode1" + And Run "rm -f /root/.ssh/id_rsa.pub" on "hanode1" Then Cluster service is "started" on "hanode1" When Run "crm cluster join -c hanode1 -y" on "hanode2" Then Cluster service is "started" on "hanode2" - When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode1" - When Run "rm -f /root/.ssh/id_rsa.pub" on "hanode2" - When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1" - When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2" - When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1" - And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2" - And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1" - And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2" - And Run "crm status" on "hanode1" - Then Check user shell for hacluster between "hanode1 hanode2" - Then Check passwordless for hacluster between "hanode1 hanode2" @skip_non_root @clean @@ -229,23 +219,3 @@ Feature: Regression test for bootstrap bugs And Expected "hacluster:haclient" in stdout And Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2" And Expected "hacluster:haclient" in stdout - # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys exists - When Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode1" - And Run "chown root:root ~hacluster/.ssh/authorized_keys && chmod 0600 ~hacluster/.ssh/authorized_keys" on "hanode2" - And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1" - And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2" - And Run "crm status" on "hanode1" - Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1" - And Expected "hacluster:haclient" in stdout - Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2" - And Expected "hacluster:haclient" in stdout - # in an upgraded cluster in which ~hacluster/.ssh/authorized_keys does not exist - When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode1" - And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh/" on "hanode2" - And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1" - And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2" - And Run "crm status" on "hanode1" - Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode1" - And Expected "hacluster:haclient" in stdout - Then Run "stat -c '%U:%G' ~hacluster/.ssh/authorized_keys" OK on "hanode2" - And Expected "hacluster:haclient" in stdout diff --git a/test/features/bootstrap_init_join_remove.feature b/test/features/bootstrap_init_join_remove.feature index 4a1e924dd9..9338c1ec4b 100644 --- a/test/features/bootstrap_init_join_remove.feature +++ b/test/features/bootstrap_init_join_remove.feature @@ -183,21 +183,3 @@ Feature: crmsh bootstrap process - init, join and remove Then Cluster service is "started" on "hanode3" And Online nodes are "hanode1 hanode2 hanode3" And Check passwordless for hacluster between "hanode1 hanode2 hanode3" - - Scenario: Check hacluster's user shell - Given Cluster service is "stopped" on "hanode3" - When Run "crm cluster join -c hanode1 -y" on "hanode3" - Then Cluster service is "started" on "hanode3" - And Online nodes are "hanode1 hanode2 hanode3" - When Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode1" - And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode2" - And Run "rm -rf /var/lib/heartbeat/cores/hacluster/.ssh" on "hanode3" - And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode1" - And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode2" - And Run "usermod -s /usr/sbin/nologin hacluster" on "hanode3" - And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1" - And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode2" - And Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode3" - And Run "crm status" on "hanode1" - Then Check user shell for hacluster between "hanode1 hanode2 hanode3" - Then Check passwordless for hacluster between "hanode1 hanode2 hanode3" diff --git a/test/features/healthcheck.feature b/test/features/healthcheck.feature deleted file mode 100644 index da7f78ac3e..0000000000 --- a/test/features/healthcheck.feature +++ /dev/null @@ -1,37 +0,0 @@ -@healthcheck -Feature: healthcheck detect and fix problems in a crmsh deployment - - Tag @clean means need to stop cluster service if the service is available - Need nodes: hanode1 hanode2 hanode3 - - Background: Setup a two nodes cluster - Given Cluster service is "stopped" on "hanode1" - And Cluster service is "stopped" on "hanode2" - And Cluster service is "stopped" on "hanode3" - When Run "crm cluster init -y" on "hanode1" - Then Cluster service is "started" on "hanode1" - And Show cluster status on "hanode1" - When Run "crm cluster join -c hanode1 -y" on "hanode2" - Then Cluster service is "started" on "hanode2" - And Online nodes are "hanode1 hanode2" - And Show cluster status on "hanode1" - - @clean - Scenario: a new node joins when directory ~hacluster/.ssh is removed from cluster - When Run "rm -rf ~hacluster/.ssh" on "hanode1" - And Run "rm -rf ~hacluster/.ssh" on "hanode2" - And Run "crm cluster join -c hanode1 -y" on "hanode3" - Then Cluster service is "started" on "hanode3" - # FIXME: new join implement does not trigger a exception any longer, and the auto fix is not applied - # And File "~hacluster/.ssh/id_rsa" exists on "hanode1" - # And File "~hacluster/.ssh/id_rsa" exists on "hanode2" - # And File "~hacluster/.ssh/id_rsa" exists on "hanode3" - - # skip non-root as behave_agent is not able to run commands interactively with non-root sudoer - @skip_non_root - @clean - Scenario: An upgrade_seq file in ~hacluster/crmsh/ will be migrated to /var/lib/crmsh (bsc#1213050) - When Run "mv /var/lib/crmsh ~hacluster/" on "hanode1" - Then File "~hacluster/crmsh/upgrade_seq" exists on "hanode1" - When Run "crm cluster status" on "hanode1" - Then File "/var/lib/crmsh/upgrade_seq" exists on "hanode1"