From fa4ec6674d1fc59731c57dc5ed9d3c64716b4061 Mon Sep 17 00:00:00 2001 From: zuohanxu Date: Tue, 18 Apr 2023 21:11:08 +0800 Subject: [PATCH 1/2] avocado/utils/ssh.py Add timeout parameter,convenient command quick stop. Signed-off-by: zuohanxu --- avocado/utils/ssh.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/avocado/utils/ssh.py b/avocado/utils/ssh.py index 0e9c63b256..3f068042c4 100644 --- a/avocado/utils/ssh.py +++ b/avocado/utils/ssh.py @@ -205,7 +205,7 @@ def get_raw_ssh_command(self, command): """ return self._ssh_cmd(self.DEFAULT_OPTIONS, ("-q",), command) - def cmd(self, command, ignore_status=True): + def cmd(self, command, ignore_status=True, timeout=None): """ Runs a command over the SSH session @@ -217,12 +217,17 @@ def cmd(self, command, ignore_status=True): in case of either the command or ssh connection returned with exit status other than zero. :type ignore_status: bool + :param timeout: Limit the command execution time, if you want the command + to end within a few seconds, you can set a specific time. + :type timeout: float :returns: The command result object. :rtype: A :class:`avocado.utils.process.CmdResult` instance. """ try: return process.run( - self.get_raw_ssh_command(command), ignore_status=ignore_status + self.get_raw_ssh_command(command), + ignore_status=ignore_status, + timeout=timeout, ) except process.CmdError as exc: if exc.result.exit_status == 255: From 0866c13982534b490ffb19abd339c33bc34e0008 Mon Sep 17 00:00:00 2001 From: zuohanxu Date: Wed, 19 Jul 2023 20:56:23 +0800 Subject: [PATCH 2/2] Resolved some issues where execution could not be voluntarily stopped Signed-off-by: zuohanxu --- .github/workflows/ci.yml | 58 +++- .github/workflows/pr_announcement.yml | 52 +++ .github/workflows/prerelease.yml | 2 +- .github/workflows/release.yml | 4 +- .github/workflows/weekly.yml | 2 - .packit.yml | 6 +- .pylintrc | 9 +- Makefile | 19 +- Makefile.gh | 4 +- VERSION | 2 +- avocado/core/__init__.py | 8 +- avocado/core/dispatcher.py | 73 ++++ avocado/core/exceptions.py | 64 +--- avocado/core/extension_manager.py | 4 +- avocado/core/job.py | 58 ++-- avocado/core/main.py | 27 +- avocado/core/messages.py | 17 +- avocado/core/output.py | 256 ++++++-------- avocado/core/resolver.py | 9 +- avocado/core/safeloader/core.py | 62 +++- avocado/core/spawners/common.py | 8 +- avocado/core/streams.py | 9 +- avocado/core/task/runtime.py | 73 +++- avocado/core/task/statemachine.py | 106 ++++-- avocado/core/test.py | 20 +- avocado/core/utils/loader.py | 73 ++-- avocado/core/utils/messages.py | 68 ++-- avocado/plugins/assets.py | 27 +- avocado/plugins/diff.py | 8 +- avocado/plugins/plugins.py | 73 +--- avocado/plugins/runner_nrunner.py | 5 +- avocado/plugins/runners/asset.py | 5 +- .../plugins/runners/avocado_instrumented.py | 17 +- avocado/plugins/runners/exec_test.py | 9 + avocado/plugins/runners/package.py | 8 +- avocado/plugins/runners/podman_image.py | 5 +- avocado/plugins/runners/python_unittest.py | 2 + avocado/plugins/runners/sysinfo.py | 3 + avocado/plugins/runners/tap.py | 4 + avocado/plugins/spawners/lxc.py | 311 +++++++++++++++++ avocado/plugins/spawners/process.py | 2 + avocado/plugins/sysinfo.py | 90 ++++- avocado/utils/build.py | 2 +- avocado/utils/cloudinit.py | 4 +- avocado/utils/cpu.py | 12 +- avocado/utils/data_structures.py | 38 +++ avocado/utils/disk.py | 52 ++- avocado/utils/distro.py | 4 +- avocado/utils/download.py | 36 +- avocado/utils/memory.py | 16 +- avocado/utils/multipath.py | 44 +++ avocado/utils/network/hosts.py | 2 +- avocado/utils/network/interfaces.py | 71 +++- avocado/utils/nvme.py | 317 +++++++++++++++++- avocado/utils/process.py | 11 +- avocado/utils/software_manager/main.py | 7 + avocado/utils/ssh.py | 8 +- avocado/utils/wait.py | 7 +- .../selftests/check-copr-rpm-version.docker | 2 +- .../{fedora-36.docker => fedora-38.docker} | 5 +- docs/source/blueprints/BP005.rst | 2 +- .../guides/contributor/chapters/how.rst | 20 +- .../guides/contributor/chapters/rfc.rst | 4 +- .../guides/contributor/chapters/tips.rst | 6 +- .../guides/user/chapters/dependencies.rst | 9 + .../guides/user/chapters/introduction.rst | 8 +- docs/source/guides/user/chapters/logging.rst | 26 +- .../source/guides/writer/chapters/logging.rst | 249 ++++++++++---- .../source/guides/writer/chapters/writing.rst | 24 +- docs/source/plugins/index.rst | 31 +- docs/source/plugins/optional/golang.rst | 87 +---- docs/source/plugins/optional/multiplexer.rst | 193 ----------- docs/source/plugins/optional/results.rst | 134 -------- docs/source/plugins/optional/results/html.rst | 1 + .../source/plugins/optional/results/index.rst | 20 ++ .../optional/results/result_upload.rst | 1 + .../plugins/optional/results/resultsdb.rst | 1 + docs/source/plugins/optional/robot.rst | 39 +-- .../plugins/optional/varianters/cit.rst | 1 + .../plugins/optional/varianters/index.rst | 13 + .../plugins/optional/varianters/pict.rst | 1 + .../optional/varianters/yaml_to_mux.rst | 1 + docs/source/plugins/standard/teststmpdir.rst | 31 ++ docs/source/releases/102_0.rst | 123 +++++++ docs/source/releases/57_0.rst | 2 +- docs/source/releases/70_0.rst | 2 +- docs/source/releases/98_0.rst | 2 +- docs/source/releases/99_0.rst | 4 +- docs/source/releases/index.rst | 1 + examples/tests/env_variables.sh | 11 +- examples/tests/external_logging_stream.py | 19 ++ examples/tests/logging_streams.py | 17 +- examples/tests/test_env.py | 6 +- man/avocado.rst | 2 +- optional_plugins/ansible/VERSION | 2 +- optional_plugins/ansible/setup.py | 2 +- optional_plugins/golang/MANIFEST.in | 2 +- optional_plugins/golang/README | 1 + optional_plugins/golang/README.rst | 86 +++++ optional_plugins/golang/VERSION | 2 +- optional_plugins/golang/setup.py | 17 +- optional_plugins/html/MANIFEST.in | 2 +- optional_plugins/html/README | 1 + optional_plugins/html/README.rst | 28 ++ optional_plugins/html/VERSION | 2 +- .../html/avocado_result_html/__init__.py | 8 +- optional_plugins/html/setup.py | 17 +- optional_plugins/result_upload/MANIFEST.in | 2 +- optional_plugins/result_upload/README | 1 + optional_plugins/result_upload/README.rst | 51 +++ optional_plugins/result_upload/VERSION | 2 +- optional_plugins/result_upload/setup.py | 17 +- optional_plugins/resultsdb/MANIFEST.in | 2 +- optional_plugins/resultsdb/README | 1 + optional_plugins/resultsdb/README.rst | 39 +++ optional_plugins/resultsdb/VERSION | 2 +- optional_plugins/resultsdb/setup.py | 17 +- optional_plugins/robot/MANIFEST.in | 2 +- optional_plugins/robot/README | 1 + optional_plugins/robot/README.rst | 38 +++ optional_plugins/robot/VERSION | 2 +- optional_plugins/robot/setup.py | 17 +- optional_plugins/varianter_cit/MANIFEST.in | 2 +- optional_plugins/varianter_cit/README | 1 + .../varianter_cit/README.rst | 0 optional_plugins/varianter_cit/VERSION | 2 +- optional_plugins/varianter_cit/setup.py | 16 +- optional_plugins/varianter_pict/MANIFEST.in | 2 +- optional_plugins/varianter_pict/README | 1 + .../varianter_pict/README.rst | 0 optional_plugins/varianter_pict/VERSION | 2 +- optional_plugins/varianter_pict/setup.py | 17 +- .../varianter_yaml_to_mux/MANIFEST.in | 2 +- optional_plugins/varianter_yaml_to_mux/README | 1 + .../varianter_yaml_to_mux/README.rst | 196 ++++++++++- .../varianter_yaml_to_mux/VERSION | 2 +- .../varianter_yaml_to_mux/setup.py | 17 +- .../tests/.data/mux-selftest.yaml | 2 +- python-avocado.spec | 13 +- requirements-dev.txt | 4 +- .../.data/exec_test_std/exec_test_1mib.py | 2 +- .../.data/exec_test_std/exec_test_64kib.py | 2 +- selftests/.data/test_statuses.py | 3 + selftests/check.py | 5 + selftests/functional/basic.py | 165 ++++++++- selftests/functional/job_api_features.py | 29 ++ selftests/functional/output.py | 16 +- selftests/functional/plugin/bystatus.py | 2 +- .../functional/plugin/runners/exec_test.py | 37 ++ selftests/functional/plugin/spawners/lxc.py | 134 ++++++++ selftests/functional/runner_package.py | 6 + selftests/functional/serial/requirements.py | 44 ++- selftests/functional/streams.py | 77 ++++- selftests/functional/sysinfo.py | 87 +++++ selftests/functional/utils/distro.py | 55 +++ .../tests/check-copr-rpm-version.sh | 2 +- selftests/signedoff-check.sh | 18 +- selftests/unit/datadir.py | 20 +- selftests/unit/job.py | 47 ++- selftests/unit/nrunner.py | 4 + selftests/unit/plugin/assets.py | 20 +- selftests/unit/runner_package.py | 6 + selftests/unit/runner_sysinfo.py | 5 + selftests/unit/safeloader_core.py | 29 +- selftests/unit/tags.py | 80 +++++ selftests/unit/task_runtime.py | 4 +- selftests/unit/utils/linux_modules.py | 4 +- selftests/unit/utils/process.py | 4 + setup.py | 7 + spell.ignore | 2 + 170 files changed, 3644 insertions(+), 1307 deletions(-) create mode 100644 .github/workflows/pr_announcement.yml create mode 100644 avocado/plugins/spawners/lxc.py rename contrib/containers/ci/selftests/{fedora-36.docker => fedora-38.docker} (62%) mode change 100644 => 120000 docs/source/plugins/optional/golang.rst delete mode 100644 docs/source/plugins/optional/multiplexer.rst delete mode 100644 docs/source/plugins/optional/results.rst create mode 120000 docs/source/plugins/optional/results/html.rst create mode 100644 docs/source/plugins/optional/results/index.rst create mode 120000 docs/source/plugins/optional/results/result_upload.rst create mode 120000 docs/source/plugins/optional/results/resultsdb.rst mode change 100644 => 120000 docs/source/plugins/optional/robot.rst create mode 120000 docs/source/plugins/optional/varianters/cit.rst create mode 100644 docs/source/plugins/optional/varianters/index.rst create mode 120000 docs/source/plugins/optional/varianters/pict.rst create mode 120000 docs/source/plugins/optional/varianters/yaml_to_mux.rst create mode 100644 docs/source/plugins/standard/teststmpdir.rst create mode 100644 docs/source/releases/102_0.rst create mode 100644 examples/tests/external_logging_stream.py create mode 120000 optional_plugins/golang/README create mode 100644 optional_plugins/golang/README.rst create mode 120000 optional_plugins/html/README create mode 100644 optional_plugins/html/README.rst create mode 120000 optional_plugins/result_upload/README create mode 100644 optional_plugins/result_upload/README.rst create mode 120000 optional_plugins/resultsdb/README create mode 100644 optional_plugins/resultsdb/README.rst create mode 120000 optional_plugins/robot/README create mode 100644 optional_plugins/robot/README.rst create mode 120000 optional_plugins/varianter_cit/README rename docs/source/plugins/optional/varianter_cit.rst => optional_plugins/varianter_cit/README.rst (100%) create mode 120000 optional_plugins/varianter_pict/README rename docs/source/plugins/optional/varianter_pict.rst => optional_plugins/varianter_pict/README.rst (100%) create mode 120000 optional_plugins/varianter_yaml_to_mux/README rename docs/source/plugins/optional/varianter_yaml_to_mux.rst => optional_plugins/varianter_yaml_to_mux/README.rst (74%) create mode 100644 selftests/functional/plugin/runners/exec_test.py create mode 100644 selftests/functional/plugin/spawners/lxc.py create mode 100644 selftests/functional/utils/distro.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e4566197f8..5c32ab5db4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: name: Static checks runs-on: ubuntu-20.04 container: - image: quay.io/avocado-framework/avocado-ci-fedora-36 + image: quay.io/avocado-framework/avocado-ci-fedora-38 steps: - run: echo "Job triggered by a ${{ github.event_name }} event on branch is ${{ github.ref }} in repository is ${{ github.repository }}, runner on ${{ runner.os }}" @@ -18,6 +18,7 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 + ref: ${{ github.event.pull_request.head.sha }} - name: Allow git to operate on directory checked out by GH Actions run: git config --global --add safe.directory `pwd` - name: Installing Avocado development dependencies @@ -25,6 +26,8 @@ jobs: - name: Installing Avocado in develop mode run: python3 setup.py develop --user - name: Run static checks + env: + COMMIT_COUNT: ${{ github.event.pull_request.commits }} run: python3 setup.py test --select=static-checks - name: Archive failed tests logs if: failure() @@ -35,10 +38,34 @@ jobs: retention-days: 1 - run: echo "🥑 This job's status is ${{ job.status }}." - full-smokecheck-linux: + smokecheck-linux: + + name: Smokecheck on Linux with Python ${{ matrix.python-version }} + runs-on: ubuntu-20.04 + + strategy: + matrix: + python-version: [3.7, 3.8, 3.9, 3.10.0, 3.11] + fail-fast: false + + steps: + - run: echo "Job triggered by a ${{ github.event_name }} event on branch is ${{ github.ref }} in repository is ${{ github.repository }}, runner on ${{ runner.os }}" + - name: Check out repository code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Avocado smokecheck + run: make smokecheck + + check-linux: name: Linux with Python ${{ matrix.python-version }} runs-on: ubuntu-20.04 + needs: smokecheck-linux strategy: matrix: @@ -63,8 +90,6 @@ jobs: run: python3 setup.py develop --user - name: Avocado version run: avocado --version - - name: Avocado smoketest - run: python -m avocado run examples/tests/passtest.py - name: Unittests and fast functional tests run: python3 setup.py test --skip=static-checks - name: Archive failed tests logs @@ -109,11 +134,11 @@ jobs: - run: echo "🥑 This job's status is ${{ job.status }}." -# OS X smokecheck on latest Python +# macOS check on latest Python - smokecheck-osx: + check-macos: - name: OS X with Python ${{ matrix.python-version }} + name: macOS with Python ${{ matrix.python-version }} runs-on: macos-12 strategy: @@ -141,7 +166,7 @@ jobs: - name: List test run: python -m avocado --verbose list selftests/unit/* selftests/functional/* selftests/*sh - name: Run a subset of avocado's selftests - run: PATH=~/Library/Python/3.11/bin:$PATH ./selftests/check.py --select=nrunner-interface,job-api,jobs,optional-plugins + run: PATH=~/Library/Python/3.11/bin:$PATH ./selftests/check.py --skip=static-checks - run: echo "🥑 This job's status is ${{ job.status }}." @@ -238,7 +263,6 @@ jobs: fetch-depth: 0 - name: Run Codespell Check run: make -f Makefile.gh codespell - continue-on-error: True - name: Run bandit check run: make -f Makefile.gh bandit continue-on-error: True @@ -257,10 +281,10 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - container: ["fedora:35", - "fedora:36", - "registry.access.redhat.com/ubi8/ubi", - "registry.access.redhat.com/ubi9-beta/ubi", + container: ["fedora:37", + "fedora:38", + "registry.access.redhat.com/ubi8/ubi:8.8", + "registry.access.redhat.com/ubi9/ubi:9.2", "debian:10.10", "debian:11.0", "ubuntu:21.10", @@ -286,10 +310,10 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - container: ["fedora:35", - "fedora:36", - "registry.access.redhat.com/ubi8/ubi", - "registry.access.redhat.com/ubi9-beta/ubi", + container: ["fedora:37", + "fedora:38", + "registry.access.redhat.com/ubi8/ubi:8.8", + "registry.access.redhat.com/ubi9/ubi:9.2", "debian:10.10", "debian:11.0", "ubuntu:21.10", diff --git a/.github/workflows/pr_announcement.yml b/.github/workflows/pr_announcement.yml new file mode 100644 index 0000000000..706519ef48 --- /dev/null +++ b/.github/workflows/pr_announcement.yml @@ -0,0 +1,52 @@ +name: PR announcement + +on: + pull_request_target: + types: + - opened + +jobs: + commnet-to-pr: + name: Do an announcement to PR + runs-on: ubuntu-latest + steps: + - name: Generate token + id: generate_token + uses: tibdex/github-app-token@021a2405c7f990db57f5eae5397423dcc554159c + with: + app_id: ${{ secrets.MR_AVOCADO_ID }} + installation_id: ${{ secrets.MR_AVOCADO_INSTALLATION_ID }} + private_key: ${{ secrets.MR_AVOCADO_PRIVATE_KEY }} + - name: Get PR ID + env: + GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + run: | + pr_data=$(gh api graphql -f query='query { + repository(owner:"avocado-framework", name:"avocado") { + pullRequest(number:${{ github.event.number }}) { + id + } + } + }') + echo 'PR_ID='$(echo $pr_data | jq .data.repository.pullRequest.id) >> $GITHUB_ENV + - name: comment on PR + env: + GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} + run: | + gh api graphql -f query='mutation { + addComment(input: { + subjectId: ${{ env.PR_ID }}, + body: """Dear contributor, + Avocado is currently under sprint #103, which is due to release an LTS (Long Term Stability) release. + Please avoid merging changes that do not fall into these categories: + * Bug fixes + * Usability Improvements + * Documentation updates + + As for the Avocado utility modules (“avocado.utils”) it is OK to introduce new functionality, + but changes to the existing APIs (including interface and behavior) should be avoided. + These kind of changes should wait until sprint #104."""}) { + + clientMutationId + } + }' diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index 5531f75093..3ad4813d09 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -68,7 +68,7 @@ jobs: name: Avocado deployment runs-on: ubuntu-latest container: - image: fedora:36 + image: fedora:38 env: GIT_URL: 'https://github.com/avocado-framework/avocado' INVENTORY: 'selftests/deployment/inventory' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d78b49ba1a..818c72ac60 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -100,12 +100,12 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Build eggs - run: python setup.py bdist_egg + run: make -f Makefile.gh build-egg - name: Upload binaries to release uses: svenstaro/upload-release-action@v2 with: repo_token: ${{ secrets.RELEASE_TOKEN }} - file: ${{ github.workspace }}/dist/avocado_framework* + file: ${{ github.workspace }}/EGG_UPLOAD/avocado_framework*egg tag: ${{ github.event.inputs.version }} overwrite: true file_glob: true diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index 89d2246830..c8773ce209 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -43,7 +43,6 @@ jobs: run: python -m avocado run examples/tests/passtest.py - name: Tree static check, unittests and fast functional tests run: | - export AVOCADO_LOG_DEBUG="yes" export AVOCADO_CHECK_LEVEL="1" python3 selftests/check.py - name: Archive test logs @@ -86,7 +85,6 @@ jobs: run: avocado --version - name: Tree static check, unittests and fast functional tests without plugins run: | - export AVOCADO_LOG_DEBUG="yes" export AVOCADO_CHECK_LEVEL="1" python3 selftests/check.py --disable-plugin-checks golang,html,resultsdb,result_upload,robot,varianter_cit,varianter_pict,varianter_yaml_to_mux - name: Archive test logs diff --git a/.packit.yml b/.packit.yml index 1a54f67734..d62b965796 100644 --- a/.packit.yml +++ b/.packit.yml @@ -11,7 +11,7 @@ jobs: - centos-stream-9 - epel-8 - epel-9 - - fedora-36-aarch64 - - fedora-36-ppc64le - - fedora-36-s390x + - fedora-38-aarch64 + - fedora-38-ppc64le + - fedora-38-s390x enable_net: False diff --git a/.pylintrc b/.pylintrc index 63ba01def4..f1dc582509 100644 --- a/.pylintrc +++ b/.pylintrc @@ -61,7 +61,7 @@ enable=all # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once). You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if +# disable everything first and then re-enable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes @@ -120,7 +120,8 @@ disable=C0103, W0212, W0511, W0703, - W0707 + W0707, + W1203 [REPORTS] @@ -540,5 +541,5 @@ valid-metaclass-classmethod-first-arg=cls # Exceptions that will emit a warning when being caught. Defaults to # "BaseException, Exception". -overgeneral-exceptions=BaseException, - Exception +overgeneral-exceptions=builtin.BaseException, + builtin.Exception diff --git a/Makefile b/Makefile index 0cfdd4eced..929aa005e8 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,3 @@ -include Makefile.include - -DESTDIR=/ -AVOCADO_DIRNAME=$(shell basename ${PWD}) -AVOCADO_OPTIONAL_PLUGINS=$(shell find ./optional_plugins -maxdepth 1 -mindepth 1 -type d) - - all: @echo @echo "Development related targets:" @@ -27,6 +20,13 @@ all: @echo "pip: Auxiliary target to install pip. (It's not recommended to run this directly)" @echo +include Makefile.include + +DESTDIR=/ +AVOCADO_DIRNAME=$(shell basename ${PWD}) +AVOCADO_OPTIONAL_PLUGINS=$(shell find ./optional_plugins -maxdepth 1 -mindepth 1 -type d) + + clean: $(PYTHON) setup.py clean --all @@ -40,12 +40,11 @@ requirements-dev: pip - $(PYTHON) -m pip install -r requirements-dev.txt $(PYTHON_DEVELOP_ARGS) smokecheck: clean uninstall develop - PYTHON=$(PYTHON) $(PYTHON) -m avocado run passtest.py + $(PYTHON) -m avocado run examples/tests/passtest.py check: clean uninstall develop # Unless manually set, this is equivalent to AVOCADO_CHECK_LEVEL=0 - PYTHON=$(PYTHON) $(PYTHON) selftests/check.py - selftests/check_tmp_dirs + $(PYTHON) selftests/check.py develop: $(PYTHON) setup.py develop $(PYTHON_DEVELOP_ARGS) diff --git a/Makefile.gh b/Makefile.gh index 57d283b6e0..c334952f05 100644 --- a/Makefile.gh +++ b/Makefile.gh @@ -18,8 +18,8 @@ all: @echo codespell: - pip install codespell - codespell --check-filenames --check-hidden --skip ".git,*.js,./selftests/unit/utils/cpu.py.data/*" + pip install codespell==2.2.4 + codespell --check-filenames --check-hidden --skip ".git,*.js,./selftests/unit/utils/cpu.py.data/*,./avocado/utils/pmem.py,./avocado/utils/cpu.py,./avocado/utils/linux_modules.py,./selftests/functional/output.py,./selftests/unit/utils/vmimage.py" bandit: pip install bandit diff --git a/VERSION b/VERSION index f60fc8f322..c27460a441 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -101.0 +102.0 diff --git a/avocado/core/__init__.py b/avocado/core/__init__.py index 11679b5d12..166b4aafc4 100644 --- a/avocado/core/__init__.py +++ b/avocado/core/__init__.py @@ -19,15 +19,17 @@ from avocado.core.dispatcher import InitDispatcher from avocado.core.settings import settings as stgs -from avocado.core.streams import BUILTIN_STREAM_SETS, BUILTIN_STREAMS +from avocado.core.streams import BUILTIN_STREAM_SETS, BUILTIN_STREAMS_DESCRIPTION from avocado.core.utils.path import prepend_base_path def register_core_options(): streams = [ - '"%s": %s' % _ for _ in BUILTIN_STREAMS.items() # pylint: disable=C0209 + f'"{stream}": {description}' + for stream, description in BUILTIN_STREAMS_DESCRIPTION.items() ] + [ - '"%s": %s' % _ for _ in BUILTIN_STREAM_SETS.items() # pylint: disable=C0209 + f'"{stream}": {description}' + for stream, description in BUILTIN_STREAM_SETS.items() ] streams = "; ".join(streams) help_msg = ( diff --git a/avocado/core/dispatcher.py b/avocado/core/dispatcher.py index 16d1f037d8..1421a8703f 100644 --- a/avocado/core/dispatcher.py +++ b/avocado/core/dispatcher.py @@ -20,9 +20,38 @@ :class:`avocado.core.settings_dispatcher.SettingsDispatcher` """ +import inspect +import sys + from avocado.core.enabled_extension_manager import EnabledExtensionManager +def get_dispatchers(module_name): + """Returns the classes that implement plugin dispatching + + These should inherit from the *ExtensionManager base classes + and contain suitable descriptions. + + The produced values are tuples that contain the dispatcher class + and two booleans that indicates whether the configuration and job + is needed to instantiate the class. + """ + module = sys.modules[module_name] + for _, klass in inspect.getmembers(module): + if ( + inspect.isclass(klass) + and issubclass(klass, EnabledExtensionManager) + and hasattr(klass, "PLUGIN_DESCRIPTION") + ): + params = list(inspect.signature(klass.__init__).parameters) + if len(params) == 1: + yield (klass, False, False) + elif len(params) == 2 and params[1] == "config": + yield (klass, True, False) + elif len(params) == 3 and params[1] == "config" and params[2] == "job": + yield (klass, True, True) + + class CLIDispatcher(EnabledExtensionManager): """ @@ -32,6 +61,8 @@ class CLIDispatcher(EnabledExtensionManager): 'avocado.plugins.cli' """ + PLUGIN_DESCRIPTION = "Plugins that add new options to commands (cli)" + def __init__(self): super().__init__("avocado.plugins.cli") @@ -45,6 +76,8 @@ class CLICmdDispatcher(EnabledExtensionManager): 'avocado.plugins.cli.cmd' """ + PLUGIN_DESCRIPTION = "Plugins that add new commands (cli.cmd)" + def __init__(self): super().__init__("avocado.plugins.cli.cmd") @@ -58,6 +91,10 @@ class JobPrePostDispatcher(EnabledExtensionManager): 'avocado.plugins.job.prepost' """ + PLUGIN_DESCRIPTION = ( + "Plugins that run before/after the execution of jobs (job.prepost)" + ) + def __init__(self): super().__init__("avocado.plugins.job.prepost") @@ -71,6 +108,8 @@ class TestPreDispatcher(EnabledExtensionManager): 'avocado.plugins.test.pre' """ + PLUGIN_DESCRIPTION = "Plugins that run before the execution of each test (test.pre)" + def __init__(self): super().__init__("avocado.plugins.test.pre") @@ -84,16 +123,28 @@ class TestPostDispatcher(EnabledExtensionManager): 'avocado.plugins.test.post' """ + PLUGIN_DESCRIPTION = "Plugins that run after the execution of each test (test.post)" + def __init__(self): super().__init__("avocado.plugins.test.post") class ResultDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = ( + "Plugins that generate job result in different formats (result)" + ) + def __init__(self): super().__init__("avocado.plugins.result") class ResultEventsDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = ( + "Plugins that generate job result based on job/test events (result_events)" + ) + def __init__(self, config): super().__init__( "avocado.plugins.result_events", invoke_kwds={"config": config} @@ -101,6 +152,9 @@ def __init__(self, config): class VarianterDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = "Plugins that generate test variants (varianter)" + def __init__(self): super().__init__("avocado.plugins.varianter") @@ -138,16 +192,27 @@ def map_method_with_return_copy(self, method_name, *args, **kwargs): class SuiteRunnerDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = "Plugins that run test suites on a job (suite.runner)" + def __init__(self): super().__init__("avocado.plugins.suite.runner") class InitDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = "Plugins that always need to be initialized (init)" + def __init__(self): super().__init__("avocado.plugins.init") class SpawnerDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = ( + "Plugins that spawn tasks and know about their status (spawner)" + ) + def __init__(self, config=None, job=None): super().__init__( "avocado.plugins.spawner", invoke_kwds={"job": job, "config": config} @@ -155,10 +220,18 @@ def __init__(self, config=None, job=None): class RunnableRunnerDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = ( + "Plugins that run runnables (under a task and spawner) (runnable.runner)" + ) + def __init__(self): super().__init__("avocado.plugins.runnable.runner") class CacheDispatcher(EnabledExtensionManager): + + PLUGIN_DESCRIPTION = "Plugins that manipulates with avocado cache (cache)" + def __init__(self): super().__init__("avocado.plugins.cache") diff --git a/avocado/core/exceptions.py b/avocado/core/exceptions.py index c5f7d8993a..b4730ddd84 100644 --- a/avocado/core/exceptions.py +++ b/avocado/core/exceptions.py @@ -22,12 +22,9 @@ class JobBaseException(Exception): """ The parent of all job exceptions. - You should be never raising this, but just in case, we'll set its - status' as FAIL. + It should never be raised directly. """ - status = "FAIL" - class JobError(JobBaseException): @@ -35,8 +32,6 @@ class JobError(JobBaseException): A generic error happened during a job execution. """ - status = "ERROR" - class JobTestSuiteError(JobBaseException): @@ -44,8 +39,6 @@ class JobTestSuiteError(JobBaseException): Generic error happened during the creation of a job's test suite """ - status = "ERROR" - class JobTestSuiteEmptyError(JobTestSuiteError): @@ -53,8 +46,6 @@ class JobTestSuiteEmptyError(JobTestSuiteError): Error raised when the creation of a test suite results in an empty suite """ - status = "ERROR" - class JobTestSuiteDuplicateNameError(JobTestSuiteError): @@ -62,8 +53,6 @@ class JobTestSuiteDuplicateNameError(JobTestSuiteError): Error raised when a test suite name is not unique in a job """ - status = "ERROR" - class JobTestSuiteReferenceResolutionError(JobTestSuiteError): @@ -71,7 +60,15 @@ class JobTestSuiteReferenceResolutionError(JobTestSuiteError): Test References did not produce a valid reference by any resolver """ - status = "ERROR" + +class JobFailFast(JobBaseException): + + """ + Indicates that the test has failed because failfast is enabled. + + Should be thrown when a test has failed and failfast is enabled. This will + indicate that other tests will be skipped. + """ class OptionValidationError(Exception): @@ -80,8 +77,6 @@ class OptionValidationError(Exception): An invalid option was passed to the test runner """ - status = "ERROR" - class TestBaseException(Exception): @@ -117,33 +112,6 @@ class TestError(TestBaseException): status = "ERROR" -class TestNotFoundError(TestBaseException): - - """ - Indicates that the test was not found in the test directory. - """ - - status = "ERROR" - - -class TestTimeoutInterrupted(TestBaseException): - - """ - Indicates that the test did not finish before the timeout specified. - """ - - status = "INTERRUPTED" - - -class TestInterruptedError(TestBaseException): - - """ - Indicates that the test was interrupted by the user (Ctrl+C) - """ - - status = "INTERRUPTED" - - class TestAbortError(TestBaseException): """ @@ -179,18 +147,6 @@ class TestFail(TestBaseException, AssertionError): status = "FAIL" -class TestFailFast(TestBaseException): - - """ - Indicates that the test has failed because failfast is enabled. - - Should be thrown when a test has failed and failfast is enabled. This will - indicate that other tests will be skipped. - """ - - status = "SKIP" - - class TestWarn(TestBaseException): """ diff --git a/avocado/core/extension_manager.py b/avocado/core/extension_manager.py index 3aff508cc4..b13a7e26a7 100644 --- a/avocado/core/extension_manager.py +++ b/avocado/core/extension_manager.py @@ -189,13 +189,13 @@ def map_method_with_return(self, method_name, *args, **kwargs): except KeyboardInterrupt: raise except: # catch any exception pylint: disable=W0702 - stacktrace.log_exc_info(sys.exc_info(), logger="avocado.app.debug") LOG_UI.error( 'Error running method "%s" of plugin "%s": %s', method_name, ext.name, sys.exc_info()[1], ) + stacktrace.log_exc_info(sys.exc_info(), logger=LOG_UI) return ret def map_method(self, method_name, *args): @@ -215,13 +215,13 @@ def map_method(self, method_name, *args): except KeyboardInterrupt: raise except: # catch any exception pylint: disable=W0702 - stacktrace.log_exc_info(sys.exc_info(), logger="avocado.app.debug") LOG_UI.error( 'Error running method "%s" of plugin "%s": %s', method_name, ext.name, sys.exc_info()[1], ) + stacktrace.log_exc_info(sys.exc_info(), logger=LOG_UI) def __getitem__(self, name): for ext in self.extensions: diff --git a/avocado/core/job.py b/avocado/core/job.py index 51297ca9c5..0d9e862a34 100644 --- a/avocado/core/job.py +++ b/avocado/core/job.py @@ -41,7 +41,7 @@ version, ) from avocado.core.job_id import create_unique_job_id -from avocado.core.output import LOG_JOB, LOG_UI, STD_OUTPUT +from avocado.core.output import LOG_JOB, LOG_UI, split_loggers_and_levels from avocado.core.settings import settings from avocado.core.suite import TestSuite, TestSuiteError from avocado.core.utils.version import get_avocado_git_version @@ -84,7 +84,7 @@ def register_job_options(): key="store_logging_stream", nargs="+", help_msg=help_msg, - default=["avocado.core:DEBUG"], + default=[], metavar="STREAM[:LEVEL]", key_type=list, ) @@ -142,7 +142,6 @@ def __init__(self, config=None, test_suites=None): self.config.update(config) self.log = LOG_UI self.loglevel = self.config.get("job.output.loglevel") - self.__logging_handlers = {} if self.config.get("run.dry_run.enabled"): # Modify config for dry-run unique_id = self.config.get("run.unique_job_id") if unique_id is None: @@ -209,38 +208,36 @@ def __exit__(self, _exc_type, _exc_value, _traceback): def __start_job_logging(self): # Enable test logger + full_log = os.path.join(self.logdir, "full.log") fmt = "%(asctime)s %(name)s %(levelname)-5.5s| %(message)s" - test_handler = output.add_log_handler( - LOG_JOB, logging.FileHandler, self.logfile, self.loglevel, fmt + output.add_log_handler( + LOG_JOB, + logging.FileHandler, + self.logfile, + self.loglevel, + fmt, + handler_filter=output.FilterTestMessage(), ) - main_logger = logging.getLogger("avocado") - main_logger.addHandler(test_handler) - main_logger.setLevel(self.loglevel) - self.__logging_handlers[test_handler] = [LOG_JOB.name, ""] - - # Enable console loggers - enabled_logs = self.config.get("core.show") - if "test" in enabled_logs and "early" not in enabled_logs: - self._stdout_stderr = sys.stdout, sys.stderr - # Enable std{out,err} but redirect both to stdout - sys.stdout = STD_OUTPUT.stdout - sys.stderr = STD_OUTPUT.stdout - test_handler = output.add_log_handler( - LOG_JOB, - logging.StreamHandler, - STD_OUTPUT.stdout, - logging.DEBUG, - fmt="%(message)s", + output.add_log_handler( + logging.getLogger(""), logging.FileHandler, full_log, self.loglevel, fmt + ) + + # --store-logging-stream files + store_loggers = set(self.config.get("job.run.store_logging_stream")) + for enabled_logger, level in split_loggers_and_levels(store_loggers): + if level: + logfile = os.path.join( + self.logdir, f"{enabled_logger}.{logging.getLevelName(level)}.log" + ) + else: + level = logging.DEBUG + logfile = os.path.join(self.logdir, f"{enabled_logger}.log") + output.add_log_handler( + enabled_logger, logging.FileHandler, logfile, level, fmt ) - main_logger.addHandler(test_handler) - self.__logging_handlers[test_handler] = [LOG_JOB.name, ""] def __stop_job_logging(self): - if self._stdout_stderr: - sys.stdout, sys.stderr = self._stdout_stderr - for handler, loggers in self.__logging_handlers.items(): - for logger in loggers: - logging.getLogger(logger).removeHandler(handler) + output.del_last_configuration() def _log_avocado_config(self): LOG_JOB.info("Avocado config:") @@ -477,7 +474,6 @@ def cleanup(self): """ Cleanup the temporary job handlers (dirs, global setting, ...) """ - output.del_last_configuration() self.__stop_job_logging() if not self.__keep_tmpdir and os.path.exists(self.tmpdir): shutil.rmtree(self.tmpdir) diff --git a/avocado/core/main.py b/avocado/core/main.py index 2c4d114f2d..2ec695e970 100755 --- a/avocado/core/main.py +++ b/avocado/core/main.py @@ -12,6 +12,7 @@ # Author: Lucas Meneghel Rodrigues +import multiprocessing import os import sys import tempfile @@ -42,32 +43,32 @@ def get_crash_dir(): def handle_exception(*exc_info): - # Print traceback if AVOCADO_LOG_DEBUG environment variable is set - msg = "Avocado crashed:\n" + "".join(traceback.format_exception(*exc_info)) - msg += "\n" - if os.environ.get("AVOCADO_LOG_DEBUG"): - os.write(2, msg.encode("utf-8")) + tb = "".join(traceback.format_exception(*exc_info)) # Store traceback in data_dir or TMPDIR prefix = "avocado-traceback-" prefix += time.strftime("%F_%T") + "-" tmp, name = tempfile.mkstemp(".log", prefix, get_crash_dir()) - os.write(tmp, msg.encode("utf-8")) + os.write(tmp, tb.encode("utf-8")) os.close(tmp) if exc_info[0] is KeyboardInterrupt: - msg = f"{exc_info[0].__doc__}\nYou can find details in {name}\n" + os.write( + 2, + f"{exc_info[0].__doc__}\nYou can find details in {name}\n".encode("utf-8"), + ) exit_code = exit_codes.AVOCADO_JOB_INTERRUPTED else: - # Print friendly message in console-like output - msg = ( - f"Avocado crashed unexpectedly: {exc_info[1]}\n" - f"You can find details in {name}\n" - ) + # Print friendly message and traceback in console-like output + os.write(2, f"Avocado crashed unexpectedly: {exc_info[1]}\n".encode("utf-8")) + os.write(2, tb.encode("utf-8")) + os.write(2, f"\nYou can also find details in {name}\n".encode("utf-8")) exit_code = exit_codes.AVOCADO_GENERIC_CRASH - os.write(2, msg.encode("utf-8")) sys.exit(exit_code) def main(): + if sys.platform == "darwin": + multiprocessing.set_start_method("fork") + sys.excepthook = handle_exception from avocado.core.app import AvocadoApp # pylint: disable=E0611 diff --git a/avocado/core/messages.py b/avocado/core/messages.py index 1fec1f59c2..d5cee92aa4 100644 --- a/avocado/core/messages.py +++ b/avocado/core/messages.py @@ -119,10 +119,16 @@ def prepare_metadata(task, job, start_time): """ task_id = TestID.from_identifier(task.identifier) base_path = job.test_results_path - task_path = os.path.join(base_path, task_id.str_filesystem) + task_path = task.runnable.output_dir logfile = os.path.join(task_path, DEFAULT_LOG_FILE) os.makedirs(task_path, exist_ok=True) params = [] + symlink_dir = task.metadata.get("symlink") + if symlink_dir: + os.makedirs( + os.path.abspath(os.path.join(symlink_dir, os.pardir)), exist_ok=True + ) + os.symlink(task_path, symlink_dir, target_is_directory=True) if task.runnable.variant is not None: # convert variant into the list of parameters params = [ @@ -131,7 +137,6 @@ def prepare_metadata(task, job, start_time): for param in params[1] ] - open(logfile, "w", encoding="utf-8").close() metadata = { "job_logdir": job.logdir, "job_unique_id": job.unique_id, @@ -364,10 +369,14 @@ def handle(self, message, task, job): # and levels so that they are handled appropriately based on # the Avocado job logging configuration log_name = message.get("log_name") - if log_name is not None: + if log_name is not None and log_name != "avocado.app": logger = logging.getLogger(log_name) level = logging.getLevelName(message.get("log_levelname")) - logger.log(level, message.get("log_message")) + log_message = f"{task.identifier}: {message.get('log_message')}" + logger_level = logger.level + logger.setLevel(level) + logger.log(level, log_message) + logger.setLevel(logger_level) class StdoutMessageHandler(BaseRunningMessageHandler): diff --git a/avocado/core/output.py b/avocado/core/output.py index 4f39296b40..07240308ac 100644 --- a/avocado/core/output.py +++ b/avocado/core/output.py @@ -30,10 +30,12 @@ #: Handle cases of logging exceptions which will lead to recursion error logging.raiseExceptions = False +#: Pre-defined Avocado root logger +LOG_ROOT = logging.getLogger("avocado") #: Pre-defined Avocado human UI logger LOG_UI = logging.getLogger("avocado.app") #: Pre-defined Avocado job/test logger -LOG_JOB = logging.getLogger("avocado.test") +LOG_JOB = logging.getLogger("avocado.job") class TermSupport: @@ -366,9 +368,7 @@ def enable_paginator(self): paginator = Paginator() except RuntimeError as details: # Paginator not available - logging.getLogger("avocado.app.debug").error( - "Failed to enable paginator: %s", details - ) + LOG_UI.error("Failed to enable paginator: %s", details) return self.stdout = self.stderr = paginator self.__configured = True @@ -401,17 +401,13 @@ def early_start(): """ Replace all outputs with in-memory handlers """ - if os.environ.get("AVOCADO_LOG_DEBUG"): - add_log_handler( - LOG_UI.getChild("debug"), logging.StreamHandler, sys.stdout, logging.DEBUG - ) if os.environ.get("AVOCADO_LOG_EARLY"): - add_log_handler("avocado", logging.StreamHandler, sys.stdout, logging.DEBUG) - add_log_handler(LOG_JOB, logging.StreamHandler, sys.stdout, logging.DEBUG) + add_log_handler(LOG_ROOT, logging.StreamHandler, sys.stdout) + add_log_handler(LOG_JOB, logging.StreamHandler, sys.stdout) else: STD_OUTPUT.fake_outputs() - add_log_handler("avocado", MemStreamHandler, None, logging.DEBUG) - logging.getLogger("avocado").level = logging.DEBUG + add_log_handler(LOG_ROOT, MemStreamHandler, None) + LOG_ROOT.level = logging.DEBUG CONFIG = [] @@ -430,23 +426,62 @@ def del_last_configuration(): logger.addHandler(handler) +def split_loggers_and_levels(loggers): + """Separates logger names and legger levels. + + :param loggers: Logger names with or without levels + :type loggers: List of strings in format STREAM[:LEVEL][,STREAM[:LEVEL][,...]] + :yields: Logger name and level + :rtype: tuple(logger_name, logger_level) + """ + for stream_name in loggers: + stream_level = re.split(r"(? as well - as the last partial message. Do configure your logging to not to add - newline automatically. - :param data - Raw data (a string) that will be processed. - """ - # splitlines() discards a trailing blank line, so use split() instead - data_lines = data.split("\n") - if len(data_lines) > 1: # when not last line, contains \n - self._log_line(f"{data_lines[0]}\n") - for line in data_lines[1:-1]: - self._log_line(f"{line}\n") - if data_lines[-1]: # Last line does not contain \n - self._log_line(data_lines[-1]) - - def _log_line(self, line): - """ - Forwards line to all the expected loggers along with expected prefix - """ - for logger, prefix in zip(self._loggers, self._prefixes): - logger.log(self._level, prefix + line) - - def flush(self): - pass - - @staticmethod - def isatty(): - return False - - def add_logger(self, logger, prefix=""): - self._loggers.append(logger) - self._prefixes.append(prefix) - - def rm_logger(self, logger): - idx = self._loggers.index(logger) - self._loggers.remove(logger) - self._prefixes = self._prefixes[:idx] + self._prefixes[idx + 1 :] class Throbber: diff --git a/avocado/core/resolver.py b/avocado/core/resolver.py index 8dc1410e05..51a582950b 100644 --- a/avocado/core/resolver.py +++ b/avocado/core/resolver.py @@ -104,6 +104,8 @@ class Resolver(EnabledExtensionManager): resolver plugins and a resolution policy. """ + PLUGIN_DESCRIPTION = "Plugins that resolve test references (resolver)" + DEFAULT_POLICY = { ReferenceResolutionResult.SUCCESS: ReferenceResolutionAction.RETURN, ReferenceResolutionResult.NOTFOUND: ReferenceResolutionAction.CONTINUE, @@ -145,6 +147,8 @@ class Discoverer(EnabledExtensionManager): tests from different data according to active discoverer plugins. """ + PLUGIN_DESCRIPTION = "Plugins that discover tests without references (discoverer)" + def __init__(self, config=None): super().__init__("avocado.plugins.discoverer", invoke_kwds={"config": config}) @@ -276,7 +280,10 @@ def resolve(references, hint=None, ignore_missing=True, config=None): # resolution process missing = [_ for _ in missing if not os.path.isdir(_)] if missing: - msg = f"Could not resolve references: {','.join(missing)}" + msg = ( + f"No tests found for given test references: {', '.join(missing)}\n" + f"Try 'avocado -V list {' '.join(missing)}' for details" + ) raise JobTestSuiteReferenceResolutionError(msg) return resolutions diff --git a/avocado/core/safeloader/core.py b/avocado/core/safeloader/core.py index 6b254b459d..05c31344fc 100644 --- a/avocado/core/safeloader/core.py +++ b/avocado/core/safeloader/core.py @@ -53,12 +53,35 @@ def get_methods_info(statement_body, class_tags, class_dependencies): def _extend_test_list(current, new): for test in new: test_method_name = test[0] - if test_method_name not in [_[0] for _ in current]: + found = False + for current_test in current: + if test_method_name == current_test[0]: + _exted_tests_tags([current_test], test[1]) + found = True + break + if not found: current.append(test) +def _exted_tests_tags(tests, tags, force_update=False): + for test in tests: + for tag, value in tags.items(): + if force_update: + test[1][tag] = value + else: + test[1].setdefault(tag, value) + + def _examine_same_module( - parents, info, disabled, match, module, target_module, target_class, determine_match + parents, + info, + disabled, + match, + module, + target_module, + target_class, + determine_match, + info_class_tags, ): # Searching the parents in the same module for parent in parents[:]: @@ -72,7 +95,7 @@ def _examine_same_module( # From this point we use `_$variable` to name temporary returns # from method calls that are to-be-assigned/combined with the # existing `$variable`. - _info, _disable, _match = _examine_class( + _info, _disable, parent_tags, _match = _examine_class( target_module, target_class, determine_match, @@ -82,6 +105,8 @@ def _examine_same_module( ) if _info: parents.remove(parent) + _exted_tests_tags(info, parent_tags) + _exted_tests_tags(_info, info_class_tags, True) _extend_test_list(info, _info) disabled.update(_disable) if _match is not match: @@ -213,11 +238,13 @@ def _examine_class( :type match: bool :returns: tuple where first item is a list of test methods detected for given class; second item is set of class names which - look like avocado tests but are force-disabled. + look like avocado tests but are force-disabled; + third is dict of class tags. :rtype: tuple """ module = PythonModule(path, target_module, target_class) info = [] + class_tags = {} disabled = set() for klass in module.iter_classes(class_name): @@ -229,9 +256,10 @@ def _examine_class( if match is False: match = module.is_matching_klass(klass) + class_tags = get_docstring_directives_tags(docstring) info = get_methods_info( klass.body, - get_docstring_directives_tags(docstring), + class_tags, get_docstring_directives_dependencies(docstring), ) @@ -247,6 +275,7 @@ def _examine_class( target_module, target_class, determine_match, + class_tags, ) # If there are parents left to be discovered, they @@ -266,7 +295,7 @@ def _examine_class( except ClassNotSuitable: continue - _info, _disabled, _match = _examine_class( + _info, _disabled, parent_tags, _match = _examine_class( target_module, target_class, determine_match, @@ -275,6 +304,8 @@ def _examine_class( match, ) if _info: + _exted_tests_tags(info, parent_tags) + _exted_tests_tags(_info, class_tags, True) _extend_test_list(info, _info) disabled.update(_disabled) if _match is not match: @@ -285,7 +316,7 @@ def _examine_class( if imported_symbol: found_spec = imported_symbol.get_importable_spec() if found_spec: - _info, _disabled, _match = _examine_class( + _info, _disabled, _class_tags, _match = _examine_class( target_module, target_class, determine_match, @@ -294,12 +325,17 @@ def _examine_class( match, ) if _info: + _exted_tests_tags(info, _class_tags) + _exted_tests_tags(_info, class_tags, True) _extend_test_list(info, _info) + _class_tags.update(class_tags) + class_tags = _class_tags disabled.update(_disabled) + if _match is not match: match = _match - return info, disabled, match + return info, disabled, class_tags, match def find_python_tests(target_module, target_class, determine_match, path): @@ -364,9 +400,10 @@ def find_python_tests(target_module, target_class, determine_match, path): match = True else: match = module.is_matching_klass(klass) + class_tags = get_docstring_directives_tags(docstring) info = get_methods_info( klass.body, - get_docstring_directives_tags(docstring), + class_tags, get_docstring_directives_dependencies(docstring), ) # Getting the list of parents of the current class @@ -381,6 +418,7 @@ def find_python_tests(target_module, target_class, determine_match, path): target_module, target_class, determine_match, + class_tags, ) # If there are parents left to be discovered, they @@ -400,7 +438,7 @@ def find_python_tests(target_module, target_class, determine_match, path): except ClassNotSuitable: continue - _info, _dis, _match = _examine_class( + _info, _dis, parent_tags, _match = _examine_class( target_module, target_class, determine_match, @@ -409,7 +447,9 @@ def find_python_tests(target_module, target_class, determine_match, path): match, ) if _info: - info.extend(_info) + _exted_tests_tags(info, parent_tags) + _exted_tests_tags(_info, class_tags, True) + _extend_test_list(info, _info) disabled.update(_dis) if _match is not match: match = _match diff --git a/avocado/core/spawners/common.py b/avocado/core/spawners/common.py index 425f877aaa..24da448c5d 100644 --- a/avocado/core/spawners/common.py +++ b/avocado/core/spawners/common.py @@ -1,8 +1,6 @@ import enum -import os from avocado.core.settings import settings -from avocado.core.spawners.exceptions import SpawnerException class SpawnMethod(enum.Enum): @@ -32,8 +30,4 @@ def __init__(self, config=None, job=None): self._job = job def task_output_dir(self, runtime_task): - if self._job is None: - raise SpawnerException("Job wasn't set properly") - return os.path.join( - self._job.test_results_path, runtime_task.task.identifier.str_filesystem - ) + return runtime_task.task.runnable.output_dir diff --git a/avocado/core/streams.py b/avocado/core/streams.py index 3bcc1c9681..36c9cc3bc2 100644 --- a/avocado/core/streams.py +++ b/avocado/core/streams.py @@ -1,8 +1,15 @@ #: Builtin special keywords to enable set of logging streams BUILTIN_STREAMS = { + "app": "avocado.app", + "test": "avocado.test", + "job": "avocado.job", + "early": "avocado", +} + +BUILTIN_STREAMS_DESCRIPTION = { "app": "application output", "test": "test output", - "debug": "tracebacks and other debugging info", + "job": "job output", "early": ("early logging of other streams, including test " "(very verbose)"), } diff --git a/avocado/core/task/runtime.py b/avocado/core/task/runtime.py index e278cab3b5..1547d08b50 100644 --- a/avocado/core/task/runtime.py +++ b/avocado/core/task/runtime.py @@ -1,3 +1,5 @@ +import itertools +import os from enum import Enum from avocado.core.dispatcher import TestPostDispatcher, TestPreDispatcher @@ -35,6 +37,7 @@ def from_runnable( runnable, no_digits, index, + base_dir, test_suite_name=None, status_server_uri=None, job_id=None, @@ -48,6 +51,8 @@ def from_runnable( :type no_digits: int :param index: index of tests inside test suite :type index: int + :param base_dir: Path to the job base directory. + :type base_dir: str :param test_suite_name: test suite name which this test is related to :type test_suite_name: str :param status_server_uri: the URIs for the status servers that this @@ -75,6 +80,8 @@ def from_runnable( test_id = TestID(prefix, name, runnable.variant, no_digits) + if not runnable.output_dir: + runnable.output_dir = os.path.join(base_dir, test_id.str_filesystem) # handles the test task task = Task( runnable, @@ -113,7 +120,7 @@ def __init__(self, task, satisfiable_deps_execution_statuses=None): #: :class:`avocado.core.task.runtime.RuntimeTaskStatus` self.status = None #: Information about task result when it is finished - self.result = None + self._result = None #: Timeout limit for the completion of the task execution self.execution_timeout = None #: A handle that may be set by a spawner, and that may be @@ -124,9 +131,11 @@ def __init__(self, task, satisfiable_deps_execution_statuses=None): #: The result of the spawning of a Task self.spawning_result = None self.dependencies = [] - self.satisfiable_deps_execution_statuses = ( - satisfiable_deps_execution_statuses or ["pass"] - ) + self._satisfiable_deps_execution_statuses = ["pass"] + if satisfiable_deps_execution_statuses: + self._satisfiable_deps_execution_statuses = [ + status.lower() for status in satisfiable_deps_execution_statuses + ] #: Flag to detect if the task should be save to cache self.is_cacheable = False @@ -147,6 +156,18 @@ def __eq__(self, other): return hash(self) == hash(other) return False + @property + def result(self): + return self._result + + @property + def satisfiable_deps_execution_statuses(self): + return self._satisfiable_deps_execution_statuses + + @result.setter + def result(self, result): + self._result = result.lower() + def are_dependencies_finished(self): for dependency in self.dependencies: if dependency.status not in RuntimeTaskStatus.finished_statuses(): @@ -179,6 +200,7 @@ def get_tasks_from_test_task( cls, test_task, no_digits, + base_dir, test_suite_name=None, status_server_uri=None, job_id=None, @@ -190,6 +212,8 @@ def get_tasks_from_test_task( :type test_task: :class:`avocado.core.task.runtime.RuntimeTask` :param no_digits: number of digits of the test uid :type no_digits: int + :param base_dir: Path to the job base directory. + :type base_dir: str :param test_suite_name: test suite name which this test is related to :type test_suite_name: str :param status_server_uri: the URIs for the status servers that this @@ -217,15 +241,28 @@ def get_tasks_from_test_task( satisfiable_deps_execution_statuses = None if isinstance(runnable, tuple): runnable, satisfiable_deps_execution_statuses = runnable + output_dir_not_exists = runnable.output_dir is None task = cls.from_runnable( runnable, no_digits, prefix, + base_dir, test_suite_name, status_server_uri, job_id, satisfiable_deps_execution_statuses, ) + if output_dir_not_exists: + runnable.output_dir = os.path.join( + os.path.abspath(os.path.join(base_dir, os.pardir)), + "dependencies", + str(task.task.identifier), + ) + task.task.metadata["symlink"] = os.path.join( + test_task.task.runnable.output_dir, + "dependencies", + f'{runnable.kind}-{runnable.kwargs.get("name")}', + ) task.is_cacheable = is_cacheable tasks.append(task) return tasks @@ -249,7 +286,13 @@ class RuntimeTaskGraph: """Graph representing dependencies between runtime tasks.""" def __init__( - self, tests, test_suite_name, status_server_uri, job_id, suite_config=None + self, + tests, + test_suite_name, + status_server_uri, + job_id, + base_dir, + suite_config=None, ): """Instantiates a new RuntimeTaskGraph. @@ -267,6 +310,8 @@ def __init__( sent to the destination job's status server and will make into the job's results. :type job_id: str + :param base_dir: Path to the job base directory. + :type base_dir: str :param suite_config: Configuration dict relevant for the whole suite. :type suite_config: dict """ @@ -278,6 +323,7 @@ def __init__( runnable, no_digits, index, + base_dir, test_suite_name, status_server_uri, job_id, @@ -286,28 +332,31 @@ def __init__( # with --dry-run we don't want to run dependencies if runnable.kind != "dry-run": - tasks = PreRuntimeTask.get_tasks_from_test_task( + pre_tasks = PreRuntimeTask.get_tasks_from_test_task( runtime_test, no_digits, + base_dir, test_suite_name, status_server_uri, job_id, suite_config, ) - tasks.append(runtime_test) - tasks = tasks + PostRuntimeTask.get_tasks_from_test_task( + post_tasks = PostRuntimeTask.get_tasks_from_test_task( runtime_test, no_digits, + base_dir, test_suite_name, status_server_uri, job_id, suite_config, ) - if tasks: - self._connect_tasks(tasks) + if pre_tasks or post_tasks: + self._connect_tasks(pre_tasks, [runtime_test], post_tasks) - def _connect_tasks(self, tasks): - for dependency, task in zip(tasks, tasks[1:]): + def _connect_tasks(self, pre_tasks, tasks, post_tasks): + connections = list(itertools.product(pre_tasks, tasks)) + connections += list(itertools.product(tasks, post_tasks)) + for dependency, task in connections: self.graph[task] = task self.graph[dependency] = dependency task.dependencies.append(dependency) diff --git a/avocado/core/task/statemachine.py b/avocado/core/task/statemachine.py index e3d887380b..08b74d0b01 100644 --- a/avocado/core/task/statemachine.py +++ b/avocado/core/task/statemachine.py @@ -4,9 +4,10 @@ import multiprocessing import time -from avocado.core.exceptions import TestFailFast +from avocado.core.exceptions import JobFailFast from avocado.core.task.runtime import RuntimeTaskStatus from avocado.core.teststatus import STATUSES_NOT_OK +from avocado.core.utils import messages LOG = logging.getLogger(__name__) @@ -24,6 +25,7 @@ def __init__(self, tasks, status_repo): self._finished = [] self._lock = asyncio.Lock() self._cache_lock = asyncio.Lock() + self._task_size = len(tasks) self._tasks_by_id = { str(runtime_task.task.identifier): runtime_task.task @@ -62,6 +64,10 @@ def lock(self): def cache_lock(self): return self._cache_lock + @property + def task_size(self): + return self._task_size + @property async def complete(self): async with self._lock: @@ -176,36 +182,36 @@ def __repr__(self): ) async def _send_finished_tasks_message(self, terminate_tasks, reason): - """Sends messages related to timeout to status repository. - When the task is terminated, it is necessary to send a finish message to status - repository to close logging. This method will send log message with timeout - information and finish message with right fail reason. + """Sends messages related to tasks being terminated to status repository. + + On normal conditions, the "avocado-runner-*" will produce messages + finishing each task. But, under some conditions (such as timeouts, + interruptions requested by users, etc), it's necessary to do this on + the runner's behalf. + + When a task is terminated, it is necessary to send a "finish" message + with the correct fail reason to the status repository, which will close + logging. This method will also send a "log" message with the reason + (timeout, user interruption, etc). - :param terminate_tasks: runtime_tasks which were terminated + :param terminate_tasks: runtime_tasks which were terminated and need + to have messages sent on their behalf :type terminate_tasks: list + :param reason: a description of what caused the task interruption (timeout, user + requested interruption, etc). + :type reason: str """ for terminated_task in terminate_tasks: task_id = str(terminated_task.task.identifier) job_id = terminated_task.task.job_id - encoding = "utf-8" - log_message = { - "status": "running", - "type": "log", - "log": f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())} | " - f"Runner error occurred: {reason}".encode(encoding), - "encoding": encoding, - "time": time.monotonic(), - "id": task_id, - "job_id": job_id, - } - finish_message = { - "status": "finished", - "result": "interrupted", - "fail_reason": f"Test interrupted: {reason}", - "time": time.monotonic(), - "id": task_id, - "job_id": job_id, - } + log_message = messages.LogMessage.get( + f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())} | Runner error occurred: {reason}", + id=task_id, + job_id=job_id, + ) + finish_message = messages.FinishedMessage.get( + "interrupted", f"Test interrupted: {reason}", id=task_id, job_id=job_id + ) try: current_status, _ = self._state_machine._status_repo._status[task_id] except KeyError: @@ -271,6 +277,25 @@ async def triage(self): LOG.debug( 'Task "%s" has failed dependencies', runtime_task.task.identifier ) + task_id = str(runtime_task.task.identifier) + job_id = runtime_task.task.job_id + reason = "Dependency was not fulfilled." + start_message = messages.StartedMessage.get( + output_dir=runtime_task.task.runnable.output_dir, + id=task_id, + job_id=job_id, + ) + log_message = messages.LogMessage.get( + f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())} | {reason}", + id=task_id, + job_id=job_id, + ) + finish_message = messages.FinishedMessage.get( + "skip", reason, id=task_id, job_id=job_id + ) + self._state_machine._status_repo.process_message(start_message) + self._state_machine._status_repo.process_message(log_message) + self._state_machine._status_repo.process_message(finish_message) runtime_task.result = "fail" await self._state_machine.finish_task( runtime_task, RuntimeTaskStatus.FAIL_TRIAGE @@ -291,6 +316,25 @@ async def triage(self): return if is_task_in_cache: + task_id = str(runtime_task.task.identifier) + job_id = runtime_task.task.job_id + start_message = messages.StartedMessage.get( + output_dir=runtime_task.task.runnable.output_dir, + id=task_id, + job_id=job_id, + ) + log_message = messages.LogMessage.get( + f"{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())} | " + f"Dependency fulfilled from cache.", + id=task_id, + job_id=job_id, + ) + finish_message = messages.FinishedMessage.get( + "pass", id=task_id, job_id=job_id + ) + self._state_machine._status_repo.process_message(start_message) + self._state_machine._status_repo.process_message(log_message) + self._state_machine._status_repo.process_message(finish_message) await self._state_machine.finish_task( runtime_task, RuntimeTaskStatus.IN_CACHE ) @@ -399,7 +443,7 @@ async def monitor(self): ) if self._failfast and not result_stats.isdisjoint(STATUSES_NOT_OK): await self._state_machine.abort(RuntimeTaskStatus.FAILFAST) - raise TestFailFast("Interrupting job (failfast).") + raise JobFailFast("Interrupting job (failfast).") await self._state_machine.finish_task(runtime_task, RuntimeTaskStatus.FINISHED) @@ -411,15 +455,17 @@ async def _terminate_tasks(self, task_status): await self._state_machine.abort(task_status) terminated = [] while True: - is_complete = await self._state_machine.complete async with self._state_machine.lock: try: runtime_task = self._state_machine.monitored.pop(0) + await self._terminate_task(runtime_task, task_status) + terminated.append(runtime_task) except IndexError: - if is_complete: + if ( + len(self._state_machine.finished) + len(terminated) + == self._state_machine.task_size + ): break - await self._terminate_task(runtime_task, task_status) - terminated.append(runtime_task) return terminated async def terminate_tasks_timeout(self): diff --git a/avocado/core/test.py b/avocado/core/test.py index dacc980048..9853c38570 100644 --- a/avocado/core/test.py +++ b/avocado/core/test.py @@ -21,6 +21,7 @@ import asyncio import functools import inspect +import logging import os import shutil import sys @@ -30,7 +31,6 @@ import warnings from avocado.core import exceptions, parameters -from avocado.core.output import LOG_JOB from avocado.core.settings import settings from avocado.core.test_id import TestID from avocado.core.teststatus import STATUSES_NOT_OK @@ -305,7 +305,7 @@ def record_and_warn(*args, **kwargs): self.__outputdir = utils_path.init_dir(self.logdir, "data") - self.__log = LOG_JOB + self.__log = logging.getLogger("avocado.test") original_log_warn = self.log.warning self.__log_warn_used = False self.log.warn = self.log.warning = record_and_warn @@ -607,13 +607,13 @@ def _run_test(self): self.setUp() except exceptions.TestSkipError as details: self.__skip_test = True - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) raise exceptions.TestSkipError(details) except exceptions.TestCancel: - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) raise except: # Old-style exceptions are not inherited from Exception() - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) details = sys.exc_info()[1] raise exceptions.TestSetupFail(details) else: @@ -625,10 +625,10 @@ def _run_test(self): else: testMethod() except exceptions.TestCancel: - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) raise except: # Old-style exceptions are not inherited from Exception() pylint: disable=W0702 - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) details = sys.exc_info()[1] if not isinstance(details, Exception): # Avoid passing nasty exc details = exceptions.TestError(f"{details!r}: {details}") @@ -649,7 +649,7 @@ def _tearDown(self): self.__phase = "TEARDOWN" self.tearDown() except exceptions.TestSkipError as details: - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) skip_illegal_msg = ( f"Using skip decorators in tearDown() " f"is not allowed in " @@ -658,10 +658,10 @@ def _tearDown(self): ) raise exceptions.TestError(skip_illegal_msg) except exceptions.TestCancel: - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) raise except: # avoid old-style exception failures pylint: disable=W0702 - stacktrace.log_exc_info(sys.exc_info(), logger=LOG_JOB) + stacktrace.log_exc_info(sys.exc_info(), logger=self.log) details = sys.exc_info()[1] raise exceptions.TestSetupFail(details) diff --git a/avocado/core/utils/loader.py b/avocado/core/utils/loader.py index d95bc6c0fd..965a7bdbfc 100644 --- a/avocado/core/utils/loader.py +++ b/avocado/core/utils/loader.py @@ -4,21 +4,6 @@ import sys from avocado.core import test -from avocado.utils import stacktrace - - -class TestError(test.Test): - """ - Generic test error. - """ - - def __init__(self, *args, **kwargs): - exception = kwargs.pop("exception") - test.Test.__init__(self, *args, **kwargs) - self.exception = exception - - def test(self): - self.error(self.exception) def load_test(test_factory): @@ -32,36 +17,30 @@ def load_test(test_factory): test_class, test_parameters = test_factory if "run.results_dir" in test_parameters: test_parameters["base_logdir"] = test_parameters.pop("run.results_dir") - if "modulePath" in test_parameters: - test_path = test_parameters.pop("modulePath") - else: - test_path = None - if isinstance(test_class, str): - module_name = os.path.basename(test_path).split(".")[0] - test_module_dir = os.path.abspath(os.path.dirname(test_path)) + if "modulePath" not in test_parameters: + raise RuntimeError( + 'Test factory parameters is missing the module\'s path ("modulePath")' + ) + + test_path = test_parameters.pop("modulePath") + module_name = os.path.basename(test_path).split(".")[0] + test_module_dir = os.path.abspath(os.path.dirname(test_path)) + spec = importlib.util.spec_from_file_location(module_name, test_path) + test_module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = test_module + try: # Tests with local dir imports need this - try: - sys.path.insert(0, test_module_dir) - test_module = importlib.import_module(module_name) - except: # pylint: disable=W0702 - # On load_module exception we fake the test class and pass - # the exc_info as parameter to be logged. - test_parameters["methodName"] = "test" - exception = stacktrace.prepare_exc_info(sys.exc_info()) - test_parameters["exception"] = exception - return TestError(**test_parameters) - finally: - if test_module_dir in sys.path: - sys.path.remove(test_module_dir) - for _, obj in inspect.getmembers(test_module): - if ( - inspect.isclass(obj) - and obj.__name__ == test_class - and inspect.getmodule(obj) == test_module - ): - if issubclass(obj, test.Test): - test_class = obj - break - test_instance = test_class(**test_parameters) - - return test_instance + sys.path.insert(0, test_module_dir) + spec.loader.exec_module(test_module) + finally: + if test_module_dir in sys.path: + sys.path.remove(test_module_dir) + for _, obj in inspect.getmembers(test_module): + if ( + inspect.isclass(obj) + and obj.__name__ == test_class + and inspect.getmodule(obj) == test_module + and issubclass(obj, test.Test) + ): + return obj(**test_parameters) + raise ImportError(f'Failed to find/load class "{test_class}" in "{test_path}"') diff --git a/avocado/core/utils/messages.py b/avocado/core/utils/messages.py index f01749a95b..d3f6c1542a 100644 --- a/avocado/core/utils/messages.py +++ b/avocado/core/utils/messages.py @@ -1,9 +1,9 @@ +import gc import logging import sys import time -from avocado.core import output -from avocado.core.streams import BUILTIN_STREAMS +from avocado.core.output import split_loggers_and_levels class GenericMessage: @@ -60,6 +60,7 @@ def get( class_name=None, fail_class=None, traceback=None, + **kwargs, ): # pylint: disable=W0221 """Creates finished message with all necessary information. @@ -79,14 +80,13 @@ def get( :return: finished message :rtype: dict """ - return super().get( - result=result, - fail_reason=fail_reason, - returncode=returncode, - class_name=class_name, - fail_class=fail_class, - traceback=traceback, - ) + kwargs["result"] = result + kwargs["fail_reason"] = fail_reason + kwargs["returncode"] = returncode + kwargs["class_name"] = class_name + kwargs["fail_class"] = fail_class + kwargs["traceback"] = traceback + return super().get(**kwargs) class GenericRunningMessage(GenericMessage): @@ -207,7 +207,9 @@ def emit(self, record): kwargs.update(**self.kwargs) else: kwargs = self.kwargs + gc.disable() self.queue.put(self.message.get(msg, **kwargs)) + gc.enable() class StreamToQueue: @@ -224,7 +226,9 @@ def __init__(self, queue, message_type): self.message = _supported_types[message_type] def write(self, buf): + gc.disable() self.queue.put(self.message.get(buf)) + gc.enable() def flush(self): pass @@ -243,50 +247,34 @@ def start_logging(config, queue): :type queue: multiprocessing.SimpleQueue """ - def split_loggers_and_levels(enabled_loggers, default_level): - for logger_level_split in map(lambda x: x.split(":"), enabled_loggers): - logger_name, *level = logger_level_split - yield logger_name, level[0] if len(level) > 0 else default_level - log_level = config.get("job.output.loglevel", logging.DEBUG) log_handler = RunnerLogHandler(queue, "log") fmt = "%(asctime)s %(name)s %(levelname)-5.5s| %(message)s" formatter = logging.Formatter(fmt=fmt) log_handler.setFormatter(formatter) - # main log = 'avocado' - logger = logging.getLogger("avocado") + # root log + logger = logging.getLogger("") logger.addHandler(log_handler) - logger.setLevel(log_level) - logger.propagate = False + logger.setLevel(logging.NOTSET) - # LOG_JOB = 'avocado.test' - log = output.LOG_JOB - log.addHandler(log_handler) - log.setLevel(log_level) - log.propagate = False + # main log = 'avocado' + logging.getLogger("avocado").setLevel(log_level) + + # 'avocado.test' + logging.getLogger("avocado.test").setLevel(log_level) sys.stdout = StreamToQueue(queue, "stdout") sys.stderr = StreamToQueue(queue, "stderr") - # output custom test loggers - enabled_loggers = config.get("core.show") - output_handler = RunnerLogHandler(queue, "output") - output_handler.setFormatter(logging.Formatter(fmt="%(name)s: %(message)s")) - user_streams = [ - user_streams - for user_streams in enabled_loggers - if user_streams not in BUILTIN_STREAMS - ] - for user_stream, level in split_loggers_and_levels(user_streams, log_level): - custom_logger = logging.getLogger(user_stream) - custom_logger.addHandler(output_handler) - custom_logger.setLevel(level) - # store custom test loggers enabled_loggers = config.get("job.run.store_logging_stream") - for enabled_logger, level in split_loggers_and_levels(enabled_loggers, log_level): - store_stream_handler = RunnerLogHandler(queue, "file", {"path": enabled_logger}) + for enabled_logger, level in split_loggers_and_levels(enabled_loggers): + log_path = f"{enabled_logger}.{logging.getLevelName(level)}.log" + if not level: + level = log_level + log_path = f"{enabled_logger}.log" + store_stream_handler = RunnerLogHandler(queue, "file", {"path": log_path}) store_stream_handler.setFormatter(formatter) output_logger = logging.getLogger(enabled_logger) output_logger.addHandler(store_stream_handler) diff --git a/avocado/plugins/assets.py b/avocado/plugins/assets.py index 9baf566344..b3d3025688 100644 --- a/avocado/plugins/assets.py +++ b/avocado/plugins/assets.py @@ -39,7 +39,7 @@ class FetchAssetHandler(ast.NodeVisitor): # pylint: disable=R0902 PATTERN = "fetch_asset" - def __init__(self, file_name, klass=None, method=None): + def __init__(self, file_name, test_file_parse_cache, klass=None, method=None): self.file_name = file_name # fetch assets from specific test using klass and method self.klass = klass @@ -57,7 +57,11 @@ def __init__(self, file_name, klass=None, method=None): # check if we have valid instrumented tests # discards disabled tests - self.tests = safeloader.find_avocado_tests(self.file_name)[0] + if file_name not in test_file_parse_cache: + test_file_parse_cache[file_name] = safeloader.find_avocado_tests( + self.file_name + )[0] + self.tests = test_file_parse_cache[file_name] # create Abstract Syntax Tree from test source file with open(self.file_name, encoding="utf-8") as source_file: @@ -185,8 +189,11 @@ def visit_Assign(self, node): # pylint: disable=C0103 # otherwise, save the local variable name if isinstance(node.targets[0], ast.Attribute): name = node.targets[0].attr - else: + elif hasattr(node.targets[0], "id"): name = node.targets[0].id + else: + self.generic_visit(node) + return if isinstance(node.value, ast.Str): self.asgmts[cur_klass][cur_method][name] = node.value.s @@ -212,7 +219,9 @@ def visit_Call(self, node): # pylint: disable=C0103 self.calls.append(call) -def fetch_assets(test_file, klass=None, method=None, logger=None): +def fetch_assets( + test_file, test_file_parse_cache, klass=None, method=None, logger=None +): """Fetches the assets based on keywords listed on FetchAssetHandler.calls. :param test_file: File name of instrumented test to be evaluated @@ -224,7 +233,7 @@ def fetch_assets(test_file, klass=None, method=None, logger=None): timeout = settings.as_dict().get("assets.fetch.timeout") success = [] fail = [] - handler = FetchAssetHandler(test_file, klass, method) + handler = FetchAssetHandler(test_file, test_file_parse_cache, klass, method) for call in handler.calls: expire = call.pop("expire", None) if expire is not None: @@ -275,8 +284,11 @@ def pre_tests(self, job): if candidate not in candidates: candidates.append(candidate) + test_file_parse_cache = {} for candidate in candidates: - fetch_assets(*candidate, logger) + fetch_assets( + candidate[0], test_file_parse_cache, candidate[1], candidate[2], logger + ) class Assets(CLICmd): @@ -530,10 +542,11 @@ def handle_list(self, config): def handle_fetch(config): exitcode = exit_codes.AVOCADO_ALL_OK # fetch assets from instrumented tests + cache = {} for test_file in config.get("assets.fetch.references"): if os.path.isfile(test_file) and test_file.endswith(".py"): LOG_UI.debug("Fetching assets from %s.", test_file) - success, fail = fetch_assets(test_file) + success, fail = fetch_assets(test_file, cache) for asset_file in success: LOG_UI.debug(" File %s fetched or already on cache.", asset_file) diff --git a/avocado/plugins/diff.py b/avocado/plugins/diff.py index d68e3c0c3c..90733b39b5 100644 --- a/avocado/plugins/diff.py +++ b/avocado/plugins/diff.py @@ -311,14 +311,14 @@ def _get_name_no_id(test): setsid = getattr(os, "setsid", None) if not setsid: setsid = getattr(os, "setpgrp", None) - with open(os.devnull, "r+", encoding="utf-8") as inout: + with open(os.devnull, "r+", encoding="utf-8") as in_out: cmd = ["xdg-open", html_file] subprocess.Popen( # pylint: disable=W1509 cmd, close_fds=True, - stdin=inout, - stdout=inout, - stderr=inout, + stdin=in_out, + stdout=in_out, + stderr=in_out, preexec_fn=setsid, ) diff --git a/avocado/plugins/plugins.py b/avocado/plugins/plugins.py index 783f9d403d..e55dd2fc40 100644 --- a/avocado/plugins/plugins.py +++ b/avocado/plugins/plugins.py @@ -14,10 +14,11 @@ """ Plugins information plugin """ +import itertools + from avocado.core import dispatcher from avocado.core.output import LOG_UI from avocado.core.plugin_interfaces import CLICmd -from avocado.core.resolver import Resolver from avocado.core.settings import settings from avocado.utils import astring @@ -47,62 +48,18 @@ def configure(self, parser): ) def run(self, config): - plugin_types = [ - ( - dispatcher.InitDispatcher(), - "Plugins that always need to be initialized (init): ", - ), - (dispatcher.CLICmdDispatcher(), "Plugins that add new commands (cli.cmd):"), - ( - dispatcher.CLIDispatcher(), - "Plugins that add new options to commands (cli):", - ), - ( - dispatcher.JobPrePostDispatcher(), - "Plugins that run before/after the execution of jobs (job.prepost):", - ), - ( - dispatcher.TestPreDispatcher(), - "Plugins that run before the execution of each test (test.pre):", - ), - ( - dispatcher.TestPostDispatcher(), - "Plugins that run after the execution of each test (test.post):", - ), - ( - dispatcher.ResultDispatcher(), - "Plugins that generate job result in different formats (result):", - ), - ( - dispatcher.ResultEventsDispatcher(config), - ( - "Plugins that generate job result based on job/test events " - "(result_events):" - ), - ), - ( - dispatcher.VarianterDispatcher(), - "Plugins that generate test variants (varianter): ", - ), - (Resolver(), "Plugins that resolve test references (resolver): "), - ( - dispatcher.SuiteRunnerDispatcher(), - "Plugins that run test suites on a job (suite.runner): ", - ), - ( - dispatcher.SpawnerDispatcher(), - "Plugins that spawn tasks and know about their status (spawner): ", - ), - ( - dispatcher.RunnableRunnerDispatcher(), - "Plugins that run runnables (under a task and spawner) (runnable.runner): ", - ), - ( - dispatcher.CacheDispatcher(), - "Plugins that manipulates with avocado cache: ", - ), - ] - for plugins_active, msg in plugin_types: + for plugin_dispatcher, config_needed, job_needed in itertools.chain( + dispatcher.get_dispatchers("avocado.core.dispatcher"), + dispatcher.get_dispatchers("avocado.core.resolver"), + ): + if not config_needed: + plugins_active = plugin_dispatcher() + elif config_needed and not job_needed: + plugins_active = plugin_dispatcher(config) + else: + plugins_active = plugin_dispatcher(config, None) + msg = f"{plugins_active.PLUGIN_DESCRIPTION}:" + LOG_UI.info(msg) plugin_matrix = [] if config.get("plugins.ordered_list"): @@ -117,4 +74,4 @@ def run(self, config): else: for line in astring.iter_tabular_output(plugin_matrix): LOG_UI.debug(line) - LOG_UI.debug("") + LOG_UI.debug("") diff --git a/avocado/plugins/runner_nrunner.py b/avocado/plugins/runner_nrunner.py index 379f1af8b7..b0ad4d7778 100644 --- a/avocado/plugins/runner_nrunner.py +++ b/avocado/plugins/runner_nrunner.py @@ -24,7 +24,7 @@ import tempfile from avocado.core.dispatcher import SpawnerDispatcher -from avocado.core.exceptions import JobError, TestFailFast +from avocado.core.exceptions import JobError, JobFailFast from avocado.core.messages import MessageHandler from avocado.core.nrunner.runnable import Runnable from avocado.core.nrunner.runner import check_runnables_runner_requirements @@ -286,6 +286,7 @@ def run_suite(self, job, test_suite): test_suite.name, self._determine_status_server(test_suite, "run.status_server_uri"), job.unique_id, + job.test_results_path, test_suite.config, ) # pylint: disable=W0201 @@ -354,7 +355,7 @@ def run_suite(self, job, test_suite): ) ) raise - except (KeyboardInterrupt, asyncio.TimeoutError, TestFailFast) as ex: + except (KeyboardInterrupt, asyncio.TimeoutError, JobFailFast) as ex: LOG_JOB.info(str(ex)) job.interrupted_reason = str(ex) summary.add("INTERRUPTED") diff --git a/avocado/plugins/runners/asset.py b/avocado/plugins/runners/asset.py index c971c1fa05..8cb70afdc0 100644 --- a/avocado/plugins/runners/asset.py +++ b/avocado/plugins/runners/asset.py @@ -1,5 +1,6 @@ +import sys import time -from multiprocessing import Process, SimpleQueue +from multiprocessing import Process, SimpleQueue, set_start_method from avocado.core.nrunner.app import BaseRunnerApp from avocado.core.nrunner.runner import RUNNER_RUN_STATUS_INTERVAL, BaseRunner @@ -116,6 +117,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/runners/avocado_instrumented.py b/avocado/plugins/runners/avocado_instrumented.py index 97b2a93f8b..593710a910 100644 --- a/avocado/plugins/runners/avocado_instrumented.py +++ b/avocado/plugins/runners/avocado_instrumented.py @@ -1,5 +1,6 @@ import multiprocessing import os +import sys import tempfile import time import traceback @@ -145,21 +146,13 @@ def run(self, runnable): time_started = time.monotonic() timeout = float(self.DEFAULT_TIMEOUT) - most_current_execution_state_time = None + next_status_time = None while True: time.sleep(RUNNER_RUN_CHECK_INTERVAL) now = time.monotonic() if queue.empty(): - if most_current_execution_state_time is not None: - next_execution_state_mark = ( - most_current_execution_state_time - + RUNNER_RUN_STATUS_INTERVAL - ) - if ( - most_current_execution_state_time is None - or now > next_execution_state_mark - ): - most_current_execution_state_time = now + if next_status_time is None or now > next_status_time: + next_status_time = now + RUNNER_RUN_STATUS_INTERVAL yield messages.RunningMessage.get() if (now - time_started) > timeout: process.terminate() @@ -190,6 +183,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + multiprocessing.set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/runners/exec_test.py b/avocado/plugins/runners/exec_test.py index c7d37536e4..8eaf907e7f 100644 --- a/avocado/plugins/runners/exec_test.py +++ b/avocado/plugins/runners/exec_test.py @@ -1,6 +1,8 @@ +import multiprocessing import os import shutil import subprocess +import sys import tempfile import pkg_resources @@ -96,9 +98,14 @@ def _get_env_variables(self, runnable): avocado_test_env_variables = { "AVOCADO_VERSION": self._get_avocado_version(), "AVOCADO_TEST_WORKDIR": workdir, + "AVOCADO_TEST_BASEDIR": os.path.dirname(os.path.abspath(runnable.uri)), } if runnable.output_dir: + avocado_test_env_variables["AVOCADO_TEST_LOGDIR"] = runnable.output_dir avocado_test_env_variables["AVOCADO_TEST_OUTPUTDIR"] = runnable.output_dir + avocado_test_env_variables["AVOCADO_TEST_LOGFILE"] = os.path.join( + runnable.output_dir, "debug.log" + ) return avocado_test_env_variables @staticmethod @@ -205,6 +212,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + multiprocessing.set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/runners/package.py b/avocado/plugins/runners/package.py index ced0cc3f21..e2f5a32f08 100644 --- a/avocado/plugins/runners/package.py +++ b/avocado/plugins/runners/package.py @@ -1,5 +1,6 @@ +import sys import time -from multiprocessing import Process, SimpleQueue +from multiprocessing import Process, SimpleQueue, set_start_method from avocado.core.nrunner.app import BaseRunnerApp from avocado.core.nrunner.runner import RUNNER_RUN_STATUS_INTERVAL, BaseRunner @@ -90,9 +91,10 @@ def _run_software_manager(self, cmd, package, queue): output = { "result": "error", "stdout": "", - "stderr": ("Package manager not supported or not" " available."), + "stderr": ("Package manager not supported or not available."), } queue.put(output) + return if cmd == "install": result, stdout, stderr = self._install(software_manager, cmd, package) @@ -161,6 +163,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/runners/podman_image.py b/avocado/plugins/runners/podman_image.py index 70b1ca73b1..0b9a5dd5c7 100644 --- a/avocado/plugins/runners/podman_image.py +++ b/avocado/plugins/runners/podman_image.py @@ -1,7 +1,8 @@ import asyncio import logging +import sys import time -from multiprocessing import Process, SimpleQueue +from multiprocessing import Process, SimpleQueue, set_start_method from avocado.core.nrunner.app import BaseRunnerApp from avocado.core.nrunner.runner import RUNNER_RUN_STATUS_INTERVAL, BaseRunner @@ -68,6 +69,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/runners/python_unittest.py b/avocado/plugins/runners/python_unittest.py index f902a2bf8c..c9a717efbe 100644 --- a/avocado/plugins/runners/python_unittest.py +++ b/avocado/plugins/runners/python_unittest.py @@ -192,6 +192,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + multiprocessing.set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/runners/sysinfo.py b/avocado/plugins/runners/sysinfo.py index 8dc1458131..8cfc06bcb8 100644 --- a/avocado/plugins/runners/sysinfo.py +++ b/avocado/plugins/runners/sysinfo.py @@ -1,5 +1,6 @@ import multiprocessing import os +import sys import time import traceback @@ -204,6 +205,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + multiprocessing.set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/runners/tap.py b/avocado/plugins/runners/tap.py index d6822c6582..28001c7f40 100644 --- a/avocado/plugins/runners/tap.py +++ b/avocado/plugins/runners/tap.py @@ -1,4 +1,6 @@ import io +import multiprocessing +import sys from avocado.core.nrunner.app import BaseRunnerApp from avocado.core.tapparser import TapParser, TestResult @@ -74,6 +76,8 @@ class RunnerApp(BaseRunnerApp): def main(): + if sys.platform == "darwin": + multiprocessing.set_start_method("fork") app = RunnerApp(print) app.run() diff --git a/avocado/plugins/spawners/lxc.py b/avocado/plugins/spawners/lxc.py new file mode 100644 index 0000000000..bea95f44f9 --- /dev/null +++ b/avocado/plugins/spawners/lxc.py @@ -0,0 +1,311 @@ +import asyncio +import contextlib +import logging +import os +import tempfile + +try: + import lxc + + LXC_AVAILABLE = True +except ImportError: + lxc = None + LXC_AVAILABLE = False + +from avocado.core.plugin_interfaces import Init, Spawner +from avocado.core.settings import settings +from avocado.core.spawners.common import SpawnerMixin, SpawnMethod + +LOG = logging.getLogger(__name__) + + +class LXCSpawnerException(Exception): + """Errors more closely related to the spawner functionality""" + + +class LXCStreamsFile: + def __init__(self): + self.fd = None + self.path = None + + def fileno(self): + return self.fd + + def read(self): + with open(self.path, "r", encoding="utf-8") as fp: + return fp.read() + + def __enter__(self): + self.fd, self.path = tempfile.mkstemp() + return self + + def __exit__(self, *args): + os.remove(self.path) + + +class LXCSpawnerInit(Init): + + description = "LXC (container) based spawner initialization" + + def initialize(self): + section = "spawner.lxc" + + help_msg = "List of already available container slots to spawn in" + settings.register_option( + section=section, key="slots", help_msg=help_msg, key_type=list, default=[] + ) + + help_msg = "Distribution for the LXC container" + settings.register_option( + section=section, key="dist", help_msg=help_msg, default="fedora" + ) + + help_msg = "Release of the LXC container (depends on the choice of distro)" + settings.register_option( + section=section, key="release", help_msg=help_msg, default="32" + ) + + help_msg = "Architecture of the LXC container" + settings.register_option( + section=section, key="arch", help_msg=help_msg, default="i386" + ) + + help_msg = ( + "Container creation hook command to customize optional new containers" + ) + settings.register_option( + section=section, key="create_hook", help_msg=help_msg, default="" + ) + + +def with_slot_reservation(fn): + """ + Decorator for slot cache context manager. + + :param fn: function to run with slot reservation + :type fn: function + :returns: same function with the slot now reserved + :rtype: function + + The main reason for the decorator is to not have to indent the entire + task running function in order to safely release the slot upon any error. + """ + + async def wrapper(self, runtime_task): + with LXCSpawner.reserve_slot(self, runtime_task) as slot: + runtime_task.spawner_handle = slot + return await fn(self, runtime_task) + + return wrapper + + +class LXCSpawner(Spawner, SpawnerMixin): + + description = "LXC (container) based spawner" + METHODS = [SpawnMethod.STANDALONE_EXECUTABLE] + slots_cache = {} + + @staticmethod + def run_container_cmd(container, command): + with LXCStreamsFile() as tmp_out, LXCStreamsFile() as tmp_err: + exitcode = container.attach_wait( + lxc.attach_run_command, command, stdout=tmp_out, stderr=tmp_err + ) + return exitcode, tmp_out.read(), tmp_err.read() + + @staticmethod + async def run_container_cmd_async(container, command): + with LXCStreamsFile() as tmp_out, LXCStreamsFile() as tmp_err: + pid = container.attach( + lxc.attach_run_command, command, stdout=tmp_out, stderr=tmp_err + ) + loop = asyncio.get_event_loop() + _, exitcode = await loop.run_in_executor( + None, os.waitpid, pid, os.WUNTRACED + ) + return exitcode, tmp_out.read(), tmp_err.read() + + @contextlib.contextmanager + def reserve_slot(self, runtime_task): + """ + Reserve a free or custom container slot for the runtime task. + + :param runtime_task: runtime task to reserve the slot for + :type runtime_task: :py:class:`avocado.core.task.runtime.RuntimeTask` + :yields: a free slot to use if such was found + :raises: :py:class:`RuntimeError` if no free slot could be found + + This will either use a runtime cache to find a free container slot to + run the task in or use a custom container/slot ID to allow for custom + schedulers to make their own decisions on which containers to run when. + """ + if len(LXCSpawner.slots_cache) == 0: + # TODO: consider whether to provide persistence across runs via external storage + LXCSpawner.slots_cache = { + k: False for k in self.config.get("spawner.lxc.slots") if k + } + # TODO: spawner can look for free containers directly and populate these slots + # for c in lxcontainer.list_containers(as_object=True): ... + + if runtime_task.spawner_handle is not None: + slot = runtime_task.spawner_handle + else: + slots = LXCSpawner.slots_cache + for key, value in slots.items(): + if not value: + slot = key + slots[key] = True + break + else: + raise RuntimeError( + "No free slot available for the task, are " + "you running with more processes than slots?" + ) + + try: + yield slot + finally: + LXCSpawner.slots_cache[slot] = False + + @staticmethod + def is_task_alive(runtime_task): + if runtime_task.spawner_handle is None: + return False + + container = lxc.Container(runtime_task.spawner_handle) + if not container.defined: + LOG.debug(f"Container {runtime_task.spawner_handle} is not defined") + return False + if not container.running: + LOG.debug( + f"Container {runtime_task.spawner_handle} state is " + f"{container.state} instead of RUNNING" + ) + return False + + status, _, _ = LXCSpawner.run_container_cmd( + container, ["pgrep", "-r", "R,S", "-f", "task-run"] + ) + return status == 0 + + @with_slot_reservation + async def spawn_task(self, runtime_task): + self.create_task_output_dir(runtime_task) + task = runtime_task.task + full_module_name = ( + runtime_task.task.runnable.pick_runner_module_from_entry_point_kind( + runtime_task.task.runnable.kind + ) + ) + if full_module_name is None: + msg = f"Could not determine Python module name for runnable with kind {runtime_task.task.runnable.kind}" + raise LXCSpawnerException(msg) + # using the "python" symlink will result in the container default python version + entry_point_args = ["python3", "-m", full_module_name, "task-run"] + entry_point_args.extend(task.get_command_args()) + + dist = self.config.get("spawner.lxc.dist") + release = self.config.get("spawner.lxc.release") + arch = self.config.get("spawner.lxc.arch") + create_hook = self.config.get("spawner.lxc.create_hook") + + if not LXC_AVAILABLE: + msg = "LXC python bindings not available on the system" + runtime_task.status = msg + return False + + container_id = runtime_task.spawner_handle + container = lxc.Container(container_id) + if not container.defined: + # Create the container rootfs + if not container.create( + "download", + lxc.LXC_CREATE_QUIET, + {"dist": dist, "release": release, "arch": arch}, + ): + LOG.error("Failed to create the container rootfs") + return False + + # Customize and deploy test data to the container + if create_hook: + customization_args = create_hook.split() + exitcode, output, err = await LXCSpawner.run_container_cmd_async( + container, customization_args + ) + LOG.debug(f"Customization command exited with code {exitcode}") + if exitcode != 0: + LOG.error(f"Error '{err}' on {container_id} with output:\n{output}") + return False + + # Start the container + if not container.running: + if not container.start(): + LOG.error("Failed to start the container") + return False + + # Wait for connectivity + # TODO: The current networking is not good enough to connect to the status server + if not container.get_ips(timeout=30): + LOG.error("Failed to connect to the container") + return False + + # Query some information + LOG.info(f"Container state: {container.state}") + LOG.info(f"Container ID: {container_id} PID: {container.init_pid}") + + exitcode, output, err = await LXCSpawner.run_container_cmd_async( + container, entry_point_args + ) + LOG.debug(f"Command exited with code {exitcode}") + if exitcode != 0: + LOG.error(f"Error '{err}' on {container_id} with output:\n{output}") + return False + + return True + + def create_task_output_dir(self, runtime_task): + output_dir_path = self.task_output_dir(runtime_task) + output_lxc_path = "/tmp/.avocado_task_output_dir" + + os.makedirs(output_dir_path, exist_ok=True) + runtime_task.task.setup_output_dir(output_lxc_path) + + async def wait_task(self, runtime_task): + while True: + if not LXCSpawner.is_task_alive(runtime_task): + return + await asyncio.sleep(0.1) + + async def terminate_task(self, runtime_task): + container = lxc.Container(runtime_task.spawner_handle) + + # Stop the container + if not container.shutdown(30): + LOG.warning("Failed to cleanly shutdown the container, forcing.") + if not container.stop(): + LOG.error("Failed to kill the container") + return False + + # TODO: we can provide extra options to not just stop but destroy the container + # Destroy the container + # if not container.destroy(): + # LOG.error("Failed to destroy the container.") + # return False + + @staticmethod + async def check_task_requirements(runtime_task): + """Check the runtime task requirements needed to be able to run""" + # right now, limit the check to the LXC availability + return LXC_AVAILABLE + + @staticmethod + async def is_requirement_in_cache(runtime_task): + return False + + @staticmethod + async def save_requirement_in_cache(runtime_task): + pass + + @staticmethod + async def update_requirement_cache(runtime_task, result): + pass diff --git a/avocado/plugins/spawners/process.py b/avocado/plugins/spawners/process.py index 2d4f464364..3f4cdd8bc1 100644 --- a/avocado/plugins/spawners/process.py +++ b/avocado/plugins/spawners/process.py @@ -50,6 +50,8 @@ async def spawn_task(self, runtime_task): def create_task_output_dir(self, runtime_task): output_dir_path = self.task_output_dir(runtime_task) os.makedirs(output_dir_path, exist_ok=True) + with open(os.path.join(output_dir_path, "debug.log"), mode="ba"): + pass runtime_task.task.setup_output_dir(output_dir_path) @staticmethod diff --git a/avocado/plugins/sysinfo.py b/avocado/plugins/sysinfo.py index 438d4f852b..14ffde142a 100644 --- a/avocado/plugins/sysinfo.py +++ b/avocado/plugins/sysinfo.py @@ -14,10 +14,18 @@ """ System information plugin """ - from avocado.core import sysinfo -from avocado.core.plugin_interfaces import CLICmd, Init, JobPostTests, JobPreTests +from avocado.core.nrunner.runnable import Runnable +from avocado.core.plugin_interfaces import ( + CLICmd, + Init, + JobPostTests, + JobPreTests, + PostTest, + PreTest, +) from avocado.core.settings import settings +from avocado.core.teststatus import STATUSES_NOT_OK from avocado.core.utils.path import prepend_base_path, system_wide_or_base_path from avocado.utils import path @@ -40,6 +48,18 @@ def initialize(self): help_msg=help_msg, ) + help_msg = ( + "Enable or disable sysinfo collection (like hardware " + "details, profiles, etc.) for each test" + ) + settings.register_option( + section="sysinfo.collect", + key="per_test", + default=False, + key_type=bool, + help_msg=help_msg, + ) + help_msg = ( "Overall timeout to collect commands, when <=0 no timeout is enforced" ) @@ -177,6 +197,72 @@ def post_tests(self, job): self.sysinfo.end() +class SysInfoTest(PreTest, PostTest): + """Implements the sysinfo pre/post test plugin. + + It will create pre/post-test tasks for collecting system information. + """ + + name = "sysinfo" + description = "Collects system information before/after the test is run." + + def _is_sysinfo_enabled(self, config): + if not ( + config.get("sysinfo.collect.enabled") + and config.get("sysinfo.collect.per_test") + ): + return False + return True + + def pre_test_runnables(self, test_runnable, suite_config=None): + suite_config = suite_config or {} + if not self._is_sysinfo_enabled(suite_config): + return [] + sysinfo_config = sysinfo.gather_collectibles_config(suite_config) + return [ + Runnable.from_avocado_config( + "sysinfo", + "pre", + config=suite_config, + name="pre", + sysinfo=sysinfo_config, + output_dir=test_runnable.output_dir, + ) + ] + + def post_test_runnables(self, test_runnable, suite_config=None): + suite_config = suite_config or {} + if not self._is_sysinfo_enabled(suite_config): + return [] + sysinfo_config = sysinfo.gather_collectibles_config(suite_config) + return [ + ( + Runnable.from_avocado_config( + "sysinfo", + "post", + config=suite_config, + name="post", + sysinfo=sysinfo_config, + test_fail=False, + output_dir=test_runnable.output_dir, + ), + ["pass"], + ), + ( + Runnable.from_avocado_config( + "sysinfo", + "post", + config=suite_config, + name="post", + sysinfo=sysinfo_config, + test_fail=True, + output_dir=test_runnable.output_dir, + ), + [status.lower() for status in STATUSES_NOT_OK], + ), + ] + + class SysInfo(CLICmd): """ diff --git a/avocado/utils/build.py b/avocado/utils/build.py index 32e86be347..0373a27bc3 100644 --- a/avocado/utils/build.py +++ b/avocado/utils/build.py @@ -107,7 +107,7 @@ def make( :returns: exit status of the make process """ - kwargs = dict(env=env, ignore_status=ignore_status) + kwargs = {"env": env, "ignore_status": ignore_status} if process_kwargs is not None: kwargs.update(process_kwargs) result = run_make(path, make, extra_args, kwargs) diff --git a/avocado/utils/cloudinit.py b/avocado/utils/cloudinit.py index 533816ab35..3b6e0cb157 100644 --- a/avocado/utils/cloudinit.py +++ b/avocado/utils/cloudinit.py @@ -150,9 +150,7 @@ def do_POST(self): Respond with status 200 if the instance phoned back. """ - path = self.path[1:] - if path[-1] == "/": - path = path[:-1] + path = self.path[1:].rstrip("/") if path == self.server.instance_id: self.server.instance_phoned_back = True self.send_response(200) diff --git a/avocado/utils/cpu.py b/avocado/utils/cpu.py index 4a24d35144..700dce6604 100644 --- a/avocado/utils/cpu.py +++ b/avocado/utils/cpu.py @@ -134,13 +134,13 @@ def get_version(): } cpu_info = _get_info() arch = get_arch() - try: - version_pattern[arch] - except KeyError as Err: - LOG.warning("No pattern string for arch: %s\n Error: %s", arch, Err) - return None + pattern = version_pattern.get(arch) + if not pattern: + LOG.warning("No pattern string for arch: %s", arch) + return "" + for line in cpu_info: - version_out = re.findall(version_pattern[arch], line) + version_out = re.findall(pattern, line) if version_out: return version_out[0].decode("utf-8") return "" diff --git a/avocado/utils/data_structures.py b/avocado/utils/data_structures.py index 2c2f3b8add..009433765e 100644 --- a/avocado/utils/data_structures.py +++ b/avocado/utils/data_structures.py @@ -10,10 +10,12 @@ # See LICENSE for more details. # # Copyright: Red Hat Inc. 2014 +# IBM, 2023 # # Authors: Ruda Moura # Lucas Meneghel Rodrigues # Harish S +# Maram Srimannarayana Murthy # """ @@ -144,6 +146,42 @@ def comma_separated_ranges_to_list(string): return values +def recursive_compare_dict(dict1, dict2, level="DictKey", diff_btw_dict=None): + """ + Difference between two dictionaries are returned + Dict values can be a dictionary, list and value + + :rtype: list or None + """ + if isinstance(dict1, dict) and isinstance(dict2, dict): + if dict1.keys() != dict2.keys(): + set1 = set(dict1.keys()) + set2 = set(dict2.keys()) + diff_btw_dict.append(f"{level} + {set1-set2} - {set2-set1}") + common_keys = set1 & set2 + else: + common_keys = set(dict1.keys()) + for k in common_keys: + recursive_compare_dict( + dict1[k], dict2[k], level=f"{level}.{k}", diff_btw_dict=diff_btw_dict + ) + return diff_btw_dict + elif isinstance(dict1, list) and isinstance(dict2, list): + if len(dict1) != len(dict2): + diff_btw_dict.append(f"{level} + {len(dict1)} - {len(dict2)}") + common_len = min(len(dict1), len(dict2)) + for i in range(common_len): + recursive_compare_dict( + dict1[i], + dict2[i], + level=f"{level}.{dict1[i]}", + diff_btw_dict=diff_btw_dict, + ) + else: + if dict1 != dict2: + diff_btw_dict.append(f"{level} - dict1 value:{dict1}, dict2 value:{dict2}") + + class Borg: """ diff --git a/avocado/utils/disk.py b/avocado/utils/disk.py index b5933a0728..98060c1afa 100644 --- a/avocado/utils/disk.py +++ b/avocado/utils/disk.py @@ -123,25 +123,55 @@ def get_disks(): return [str(disk["name"]) for disk in json_data["blockdevices"]] -def get_disks_by_id(): +def get_all_disk_paths(): """ - Returns the physical "hard drives" available with wwid + Returns all available disk names and alias on this system - This will get all the sysfs scsi disks by its device id, - irrespective of any platform and device type, a unique key - ie wwid will help work with devices in unpredictable device - name environment. + This will get all the sysfs disks name entries by its device + node name, by-uuid, by-id and by-path, irrespective of any + platform and device type - :returns: a list of scsi ids of real scsi devcies + :returns: a list of all disk path names :rtype: list of str """ - disk_list = [] - for device in os.listdir("/dev/disk/by-id/"): - if os.path.realpath(os.path.join("/dev/disk/by-id/", device)): - disk_list.append(f"/dev/disk/by-id/{device}") + disk_list = abs_path = [] + for path in [ + "/dev", + "/dev/mapper", + "/dev/disk/by-id", + "/dev/disk/by-path", + "/dev/disk/by-uuid", + "/dev/disk/by-partuuid", + "/dev/disk/by-partlabel", + ]: + if os.path.exists(path): + for device in os.listdir(path): + abs_path.append(os.path.join(path, device)) + disk_list.extend(abs_path) return disk_list +def get_absolute_disk_path(device): + """ + Returns absolute device path of given disk + + This will get actual disks path of given device, it can take + node name, by-uuid, by-id and by-path, irrespective of any + platform and device type + + :param device: disk name or disk alias names sda or scsi-xxx + :type device: str + + :returns: the device absolute path name + :rtype: bool + """ + if not os.path.exists(device): + for dev_path in get_all_disk_paths(): + if device == os.path.basename(dev_path): + return dev_path + return device + + def get_available_filesystems(): """ Return a list of all available filesystem types diff --git a/avocado/utils/distro.py b/avocado/utils/distro.py index 98847252ab..f76148d751 100644 --- a/avocado/utils/distro.py +++ b/avocado/utils/distro.py @@ -461,9 +461,9 @@ class UnionTechProbe(Probe): """ CHECK_FILE = "/etc/os-version" - CHECK_FILE_CONTAINS = "uos release" + CHECK_FILE_CONTAINS = "UnionTech OS" CHECK_FILE_DISTRO_NAME = "uos" - CHECK_VERSION_REGEX = re.compile(r"uos release (\d+)\.(\d+).*") + CHECK_VERSION_REGEX = re.compile(r"MinorVersion=(\d+)") #: the complete list of probes that have been registered diff --git a/avocado/utils/download.py b/avocado/utils/download.py index 982ca873fa..88517f20cc 100644 --- a/avocado/utils/download.py +++ b/avocado/utils/download.py @@ -62,6 +62,23 @@ def url_open(url, data=None, timeout=5): return result +def _url_download(url, filename, data): + src_file = url_open(url, data=data) + if not src_file: + msg = ( + "Failed to get file. Probably timeout was reached when " + "connecting to the server.\n" + ) + sys.stderr.write(msg) + sys.exit(1) + + try: + with open(filename, "wb") as dest_file: + shutil.copyfileobj(src_file, dest_file) + finally: + src_file.close() + + def url_download(url, filename, data=None, timeout=300): """ Retrieve a file from given url. @@ -72,24 +89,7 @@ def url_download(url, filename, data=None, timeout=300): :param timeout: (optional) default timeout in seconds. :return: `None`. """ - - def download(): - src_file = url_open(url, data=data) - if not src_file: - msg = ( - "Failed to get file. Probably timeout was reached when " - "connecting to the server.\n" - ) - sys.stderr.write(msg) - sys.exit(1) - - try: - with open(filename, "wb") as dest_file: - shutil.copyfileobj(src_file, dest_file) - finally: - src_file.close() - - process = Process(target=download) + process = Process(target=_url_download, args=(url, filename, data)) log.info("Fetching %s -> %s", url, filename) process.start() process.join(timeout) diff --git a/avocado/utils/memory.py b/avocado/utils/memory.py index 367576d19f..d46eed77cc 100644 --- a/avocado/utils/memory.py +++ b/avocado/utils/memory.py @@ -495,14 +495,20 @@ def __getattr__(self, attr): class MemInfo: """ Representation of /proc/meminfo + + There will not be memory information on systems that do not have a + /proc/meminfo file accessible. """ def __init__(self): - with open("/proc/meminfo", "r") as meminfo_file: # pylint: disable=W1514 - for line in meminfo_file.readlines(): - name = line.strip().split()[0].strip(":") - safe_name = name.replace("(", "_").replace(")", "_") - setattr(self, safe_name, _MemInfoItem(name)) + try: + with open("/proc/meminfo", "r") as meminfo_file: # pylint: disable=W1514 + for line in meminfo_file.readlines(): + name = line.strip().split()[0].strip(":") + safe_name = name.replace("(", "_").replace(")", "_") + setattr(self, safe_name, _MemInfoItem(name)) + except FileNotFoundError: + pass def __iter__(self): for item in self.__dict__.items(): diff --git a/avocado/utils/multipath.py b/avocado/utils/multipath.py index ce5d7ff775..c1b43a7ac1 100644 --- a/avocado/utils/multipath.py +++ b/avocado/utils/multipath.py @@ -97,6 +97,24 @@ def get_mpath_name(wwid): return process.run(cmd, sudo=True).stdout_text.split()[0] +def get_mpath_from_dm(dm_id): + """ + Get the mpath name for given device mapper id + + :param dev_mapper: Input device mapper dm-x + :return: mpath name like mpathx + :rtype: str + """ + cmd = "multipathd show maps format '%d %n'" + try: + mpaths = process.run(cmd, ignore_status=True, sudo=True, shell=True).stdout_text + except process.CmdError as ex: + raise MPException(f"Multipathd Command Failed : {ex} ") + for mpath in mpaths.splitlines(): + if dm_id in mpath: + return mpath.split()[1] + + def get_multipath_wwids(): """ Get list of multipath wwids. @@ -213,6 +231,32 @@ def get_path_status(disk_path): return (paths["dm_st"], paths["dev_st"], paths["chk_st"]) +def get_mpath_paths_status(wwid): + """ + Return the status of all paths of mpath device. + + :param wwid: wwid or user friedly name of mpath. + Example: mpatha or 360050768108001b3a800000000000296 + :return: Dict in the format of {path: (dm status, dev status, checker status)} + """ + mpath_op = get_multipath_details() + if not mpath_op: + return + wwid_paths = {} + for maps in mpath_op["maps"]: + if maps["name"] == wwid or maps["uuid"] == wwid: + for path_groups in maps["path_groups"]: + for paths in path_groups["paths"]: + wwid_paths[paths["dev"]] = ( + paths["dm_st"], + paths["dev_st"], + paths["chk_st"], + ) + if len(wwid_paths) != 0: + return wwid_paths + return + + def fail_path(path): """ Fail the individual paths. diff --git a/avocado/utils/network/hosts.py b/avocado/utils/network/hosts.py index 4a07c5082e..7d61ad6401 100644 --- a/avocado/utils/network/hosts.py +++ b/avocado/utils/network/hosts.py @@ -48,7 +48,7 @@ class Host: """ def __init__(self, host): - if isinstance(self, Host): + if type(self) == Host: # pylint: disable=C0123 raise TypeError("Host class should not be instantiated") self.host = host diff --git a/avocado/utils/network/interfaces.py b/avocado/utils/network/interfaces.py index 023691cba8..85aa9b0b46 100644 --- a/avocado/utils/network/interfaces.py +++ b/avocado/utils/network/interfaces.py @@ -19,9 +19,12 @@ import json import logging import os +import re import shutil +import subprocess from ipaddress import IPv4Address, ip_interface +from avocado.utils import process from avocado.utils.distro import detect as distro_detect from avocado.utils.network.common import run_command from avocado.utils.network.exceptions import NWException @@ -764,9 +767,9 @@ def validate_ipv4_netmask_format(self, netmask): netmask_list = netmask.split(".") if len(netmask_list) != 4: return False - for octect in netmask_list: - num = int(octect) - if 0 <= num <= 255: + for octet in netmask_list: + num = int(octet) + if not 0 <= num <= 255: return False octet_bin = [format(int(i), "08b") for i in netmask_list] binary_netmask = ("").join(octet_bin) @@ -781,3 +784,65 @@ def validate_ipv4_netmask_format(self, netmask): return False first_bit = False return True + + def ping_flood(self, int_name, peer_ip, ping_count): + """ + Function to start ping to remote machine with "-f" [ flood ] option, + on given interface. + + Also this function enables to track the live data to determine the + ping flood failure, in case of failure the program will exit. + + :param int_name: source interface name. + :param peer_ip: Peer IP address (IPv4 or IPv6) + :param ping_count: How many ICMP echo packets to send. + :return : returns True on successful ping flood. + returns False on ping flood failure. + :rtype : boolean + """ + cmd = f"ping -I {int_name} {peer_ip} -c {ping_count} -f " + ping_process = subprocess.Popen( + cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + pattern = r"\.{10}" + while True: + char = ping_process.stdout.read(100) + match = re.search(pattern, char) + if match: + ping_process.terminate() + msg = "ping flood failed to remote machine, Please check the logs" + LOG.debug(msg) + return False + return True + ping_process.stdout.close() + ping_process.wait() + + def get_device_IPI_name(self): + """ + Function to convert IO device name to device_ipi names according to + "/proc/interrupts" context. + Ex: vnic@30000009 to vnic-30000009 + + :return : A converted Network device according to device_ipi name. + :rtype : string + """ + + if self.is_vnic(): + cmd = ( + f"cat /sys/class/net/{self.name}/device/devspec | " + f"awk -F/ '{{print $3}}'" + ) + interface_type = process.run(cmd, shell=True, ignore_status=True).decode( + "utf-8" + ) + cmd = f"echo {interface_type} | sed 's/@/-/' " + interface_type = process.system_output( + cmd, shell=True, ignore_status=True + ).decode("utf-8") + return interface_type + elif self.is_veth(): + return self.name diff --git a/avocado/utils/nvme.py b/avocado/utils/nvme.py index ffff6fafdc..de46778e0c 100644 --- a/avocado/utils/nvme.py +++ b/avocado/utils/nvme.py @@ -22,14 +22,19 @@ """ +import json +import logging import os +import time from avocado.utils import pci, process +LOGGER = logging.getLogger(__name__) -class NvmeError(Exception): + +class NvmeException(Exception): """ - nvme DiskError + Base Exception Class for all exceptions """ @@ -39,15 +44,15 @@ def get_controller_name(pci_addr): :param pci_addr: pci_address of the adapter :rtype: string - :raises: :py:class:`NvmeError` on failure to find pci_address in OS + :raises: :py:class:`NvmeException` on failure to find pci_address in OS """ if pci_addr in pci.get_pci_addresses(): path = f"/sys/bus/pci/devices/{pci_addr}/nvme/" - return os.listdir(path) - raise NvmeError("Unable to list as wrong pci_addr") + return "".join(os.listdir(path)) + raise NvmeException("Unable to list as wrong pci_addr") -def get_number_of_ns_supported(controller_name): +def get_max_ns_supported(controller_name): """ Returns the number of namespaces supported for the nvme adapter @@ -84,9 +89,301 @@ def get_controller_id(controll_name): :param controller_name: Name of the controller eg: nvme0 :rtype: string """ - cmd = f"nvme list-ctrl /dev/{controll_name}" - output = process.system_output(cmd, shell=True, ignore_status=True).decode("utf-8") + cmd = f"nvme id-ctrl /dev/{controll_name}" + output = process.run(cmd, shell=True, sudo=True, ignore_status=True).stdout_text for line in output.splitlines(): - if "0]" in line: - return line.split(":")[-1] + if "cntlid" in line: + return line.split(":")[-1].strip() return "" + + +def get_current_ns_ids(controller_name): + """ + Returns the list of namespaces in the nvme controller + + :param controller_name: Name of the nvme controller like nvme0, nvme1 + :rtyp: list + """ + cmd = f"nvme list-ns /dev/{controller_name}" + namespaces = [] + output = process.run(cmd, shell=True, sudo=True, ignore_status=True).stdout_text + for line in output.splitlines(): + namespaces.append(int(line.split()[1].split("]")[0]) + 1) + return namespaces + + +def get_current_ns_list(controller_name): + """ + Returns the list of namespaces in the nvme controller + + :param controller_name: Name of the nvme controller like nvme0, nvme1 + :rtyp: list + """ + namespace_list = [] + namespaces_ids = get_current_ns_ids(controller_name) + for ns_id in namespaces_ids: + namespace_list.append(f"/dev/{controller_name}n{ns_id}") + return namespace_list + + +def get_block_size(controller_name): + """ + Returns the block size of the namespace. + If not found, return defaults to 4k. + + :param namespace: Name of the namespace like /dev/nvme0n1 etc.. + :rtype: Integer + """ + namespaces = get_current_ns_list(controller_name) + if namespaces: + namespace = namespaces[0] + cmd = f"nvme id-ns /dev/{namespace}" + out = process.run(cmd, shell=True, ignore_status=True) + for line in out.splitlines: + if "in use" in line: + return pow(2, int(line.split()[4].split(":")[-1])) + return 4096 + + +def delete_ns(controller_name, ns_id): + """ + Deletes the specified namespace on the controller + + :param controller_name: Nvme controller name to which namespace belongs + :param ns_id: namespace id to be deleted + """ + cont_id = get_controller_id(controller_name) + detach_ns(controller_name, ns_id, cont_id) + cmd = f"nvme delete-ns /dev/{controller_name} -n {ns_id}" + if process.system(cmd, shell=True, ignore_status=True): + raise NvmeException(f"/dev/{controller_name}n{ns_id} delete failed") + if is_ns_exists(controller_name, ns_id): + raise NvmeException("namespace still listed even after deleted") + + +def delete_all_ns(controller_name): + """ + Deletes all the name spaces available on the given nvme controller + + :param controller_name: Nvme controller name eg : nvme0, nvme1 etc.. + """ + namespaces_ids = get_current_ns_ids(controller_name) + for ns_id in namespaces_ids[::-1]: + delete_ns(controller_name, ns_id) + time.sleep(5) + + +def is_ns_exists(controller_name, ns_id): + """ + Returns if that particular namespace exists on the controller or not + + :param controller_name: name of the controller on which we want to check + ns existence + + :returns: True if exists else False + :rtype: boolean + """ + ns_list = get_current_ns_ids(controller_name) + if ns_id in ns_list: + return True + return False + + +def get_lba(namespace): + """ + Returns LBA of the namespace. If not found, return defaults to 0. + + :param namespace: nvme namespace like /dev/nvme0n1, /dev/nvme0n2 etc.. + :rtype: Integer + """ + if namespace: + cmd = f"nvme id-ns {namespace}" + out = process.run(cmd, shell=True, ignore_status=True).stdout_text + for line in out.splitlines(): + if "in use" in line: + return int(line.split()[1]) + return 0 + + +def ns_rescan(controller_name): + """ + re-scans all the names spaces on the given controller + + :param controller_name: controller name on which re-scan is applied + """ + cmd = f"nvme ns-rescan {controller_name}" + try: + process.run(cmd, shell=True, ignore_status=True) + except process.CmdError as detail: + LOGGER.debug(detail) + + +def detach_ns(controller_name, ns_id, cont_id): + """ + detach the namespace_id to specified controller + + :param ns_id: namespace ID + :param controller_name: controller name + :param cont_id: controller_ID + """ + cmd = f"nvme detach-ns /dev/{controller_name} --namespace-id={ns_id} -controllers={cont_id}" + if not process.run(cmd, shell=True, ignore_status=True): + raise NvmeException("detach command failed") + ns_rescan(controller_name) + time.sleep(3) + if is_ns_exists(controller_name, ns_id): + raise NvmeException("namespace dettached but still listing") + + +def attach_ns(ns_id, controller_name, cont_id): + """ + attach the namespace_id to specified controller + + :param ns_id: namespace ID + :param controller_name: controller name + :param cont_id: controller_ID + """ + cmd = f"nvme attach-ns /dev/{controller_name} --namespace-id={ns_id} -controllers={cont_id}" + if not process.run(cmd, shell=True, ignore_status=True): + raise NvmeException("namespaces attach command failed") + ns_rescan(controller_name) + if not is_ns_exists(controller_name, ns_id): + raise NvmeException("namespaces attached but not listing") + + +def create_full_capacity_ns(controller_name): + """ + Creates one namespace with full capacity + + :param controller_name: name of the controller like nvme0/nvme1 etc.. + """ + ns_size = get_total_capacity(controller_name) // get_block_size(controller_name) + if get_current_ns_list(controller_name): + raise NvmeException("ns already exist, delete it before creating ") + create_one_ns("1", controller_name, ns_size) + + +def create_one_ns(ns_id, controller_name, ns_size): + """ + creates a single namespaces with given size and controller_id + + :param ns_id: Namespace ID + :param controller_name: name of the controller like nvme0/nvme1 etc.. + :param ns_size: Size of the namespace that is going to be created + """ + cmd = f"nvme create-ns /dev/{controller_name} --nsze={ns_size} --ncap={ns_size} --flbas=0 --dps=0" + if process.system(cmd, shell=True, ignore_status=True): + raise NvmeException(f"namespace create command failed {cmd}") + cont_id = get_controller_id(controller_name) + attach_ns(ns_id, controller_name, cont_id) + + +def create_max_ns(controller_name, force): + """ + Creates maximum number of namespaces, with equal capacity + + :param controller_name: name of the controller like nvme0/nvme1 etc.. + :param force: if wants to create the namespace foce, then pass force=True + """ + if get_current_ns_list(controller_name) and not force: + raise NvmeException("ns already exist, cannot create max_ns") + max_ns = int(get_max_ns_supported(controller_name)) + ns_size = get_equal_ns_size(controller_name, max_ns) + for ns_id in range(1, (max_ns + 1)): + create_one_ns(str(ns_id), controller_name, ns_size) + + +def get_equal_ns_size(controller_name, ns_count): + """ + It calculate and return the size of a namespace when want to create + more than one namespace with equal sizes + + :param controller_name: name of the controller like nvme0/nvme1 etc... + :param ns_count: Number of namespaces you want to create with equal sixe + it should be less thans or eaqual to max ns supported + on the controller + :rtype: integer + """ + existing_ns_list = len(get_current_ns_ids(controller_name)) + max_ns = get_max_ns_supported(controller_name) + if ns_count > (max_ns - existing_ns_list): + raise NvmeException("required ns count is greater than max supported") + free_space = get_free_space(controller_name) + if free_space < 1000: + raise NvmeException("available free space is less than 1GB") + return int(((60 * (free_space // 4096)) // 100) // ns_count) + + +def get_free_space(controller_name): + """ + Returns the total capacity of the nvme adapter + + :param controller_name: Name of the controller eg: nvme0 + :rtype: integer + """ + cmd = f"nvme id-ctrl /dev/{controller_name}" + out = process.run(cmd, ignore_status=True, sudo=True, shell=True).stdout_text + for line in out.splitlines(): + if line.split(":")[0].strip() == "unvmcap": + return int(line.split(":")[-1].strip()) + return 0 + + +def create_namespaces(controller_name, ns_count): + """ + creates eaqual n number of namespaces on the specified controller + + :param controller_name: name of the controller like nvme0 + :param ns_count: number of namespaces to be created + """ + namespaces = get_current_ns_ids(controller_name) + if namespaces: + delete_all_ns(controller_name) + blk_size = get_total_capacity(controller_name) // get_block_size(controller_name) + ns_size = blk_size // (ns_count + 1) + for ns_id in range(1, ns_count + 1): + create_one_ns(ns_id, controller_name, ns_size) + + +def get_ns_status(controller_name, ns_id): + """ + Returns the status of namespaces on the specified controller + + :param controller_name: name of the controller like nvme0 + :param ns_id: ID of namespace for which we need the status + + :rtype: list + """ + stat = [] + cmd = f"nvme show-topology /dev/{controller_name} -o json" + data = process.run(cmd, ignore_status=True, sudo=True, shell=True).stdout_text + json_data = json.loads(data) + for data in json_data: + for subsystem in data["Subsystems"]: + for namespace in subsystem["Namespaces"]: + nsid = namespace["NSID"] + for paths in namespace["Paths"]: + if nsid == ns_id and paths["Name"] == controller_name: + stat.extend([paths["State"], paths["ANAState"]]) + return stat + + +def get_nslist_with_pci(pci_address): + """ + Fetches and returns list of namespaces for specified pci_address + + :param pci_address: pci_address of any nvme adapter + + :rtype: list + """ + ns_list = [] + cmd = "nvme show-topology -o json" + data = process.run(cmd, ignore_status=True, sudo=True, shell=True).stdout_text + json_data = json.loads(data) + for data in json_data: + for subsystem in data["Subsystems"]: + for namespace in subsystem["Namespaces"]: + for paths in namespace["Paths"]: + if paths["Address"] == pci_address: + ns_list.append(namespace["NSID"]) + return ns_list diff --git a/avocado/utils/process.py b/avocado/utils/process.py index 9bc6e1742a..b052ad6def 100644 --- a/avocado/utils/process.py +++ b/avocado/utils/process.py @@ -102,7 +102,10 @@ def get_capabilities(pid=None): """ if pid is None: pid = os.getpid() - result = run(f"getpcaps {int(pid)}", ignore_status=True) + try: + result = run(f"getpcaps {int(pid)}", ignore_status=True) + except FileNotFoundError: + return [] if result.exit_status != 0: return [] if result.stderr_text.startswith("Capabilities "): @@ -151,7 +154,7 @@ def safe_kill(pid, signal): # pylint: disable=W0621 :param signal: Signal number. """ if get_owner_id(int(pid)) == 0: - kill_cmd = f"kill -{int(int(signal))} {int(int(pid))}" + kill_cmd = f"kill -{int(signal)} {int(pid)}" try: run(kill_cmd, sudo=True) return True @@ -301,7 +304,7 @@ def process_in_ptree_is_defunct(ppid): except CmdError: # Process doesn't exist return True for pid in pids: - cmd = f"ps --no-headers -o cmd {int(int(pid))}" + cmd = f"ps --no-headers -o cmd {int(pid)}" proc_name = system_output(cmd, ignore_status=True, verbose=False) if "" in proc_name: defunct = True @@ -792,7 +795,7 @@ def send_signal(self, sig): pids = get_children_pids(self.get_pid()) pids.append(self.get_pid()) for pid in pids: - kill_cmd = f"kill -{int(int(sig))} {int(pid)}" + kill_cmd = f"kill -{int(sig)} {int(pid)}" with contextlib.suppress(Exception): run(kill_cmd, sudo=True) else: diff --git a/avocado/utils/software_manager/main.py b/avocado/utils/software_manager/main.py index 7a97f22b50..e8344bcfcb 100644 --- a/avocado/utils/software_manager/main.py +++ b/avocado/utils/software_manager/main.py @@ -88,6 +88,13 @@ def main(): logging.basicConfig(level=logging.INFO, format="%(message)s") software_manager = SoftwareManager() + if not software_manager.is_capable(): + log.error( + "ERROR: There is no backend implementation for this system. No " + "action will be performed." + ) + return exit_codes.UTILITY_FAIL + if args: action = args[0] args = " ".join(args[1:]) diff --git a/avocado/utils/ssh.py b/avocado/utils/ssh.py index 3f068042c4..74b7bf4476 100644 --- a/avocado/utils/ssh.py +++ b/avocado/utils/ssh.py @@ -223,11 +223,13 @@ def cmd(self, command, ignore_status=True, timeout=None): :returns: The command result object. :rtype: A :class:`avocado.utils.process.CmdResult` instance. """ + if timeout: + command_argument = f"timeout --foreground {timeout} {command}" + else: + command_argument = command try: return process.run( - self.get_raw_ssh_command(command), - ignore_status=ignore_status, - timeout=timeout, + self.get_raw_ssh_command(command_argument), ignore_status=ignore_status ) except process.CmdError as exc: if exc.result.exit_status == 255: diff --git a/avocado/utils/wait.py b/avocado/utils/wait.py index b123c4370e..33fc5903b0 100644 --- a/avocado/utils/wait.py +++ b/avocado/utils/wait.py @@ -18,10 +18,9 @@ def wait_for(func, timeout, first=0.0, step=1.0, text=None, args=None, kwargs=No :param args: Positional arguments to func :param kwargs: Keyword arguments to func """ - if args is None: - args = [] - if kwargs is None: - kwargs = {} + args = args or [] + kwargs = kwargs or {} + start_time = time.monotonic() end_time = start_time + timeout diff --git a/contrib/containers/ci/selftests/check-copr-rpm-version.docker b/contrib/containers/ci/selftests/check-copr-rpm-version.docker index 3085c9f8f2..84adae4fa2 100644 --- a/contrib/containers/ci/selftests/check-copr-rpm-version.docker +++ b/contrib/containers/ci/selftests/check-copr-rpm-version.docker @@ -1,5 +1,5 @@ # This container is used in selftests/pre_release/tests/check-copr-rpm-version.sh -FROM fedora:36 +FROM fedora:38 LABEL description "Fedora image used on COPR RPM version check" RUN dnf -y module disable avocado:latest RUN dnf -y install 'dnf-command(copr)' diff --git a/contrib/containers/ci/selftests/fedora-36.docker b/contrib/containers/ci/selftests/fedora-38.docker similarity index 62% rename from contrib/containers/ci/selftests/fedora-36.docker rename to contrib/containers/ci/selftests/fedora-38.docker index 8e7b69b2c8..2072823d75 100644 --- a/contrib/containers/ci/selftests/fedora-36.docker +++ b/contrib/containers/ci/selftests/fedora-38.docker @@ -1,10 +1,7 @@ -FROM fedora:36 +FROM fedora:38 LABEL description "Fedora image used on integration checks" RUN dnf -y module enable avocado:latest RUN dnf -y install dnf-plugins-core git findutils make which RUN dnf -y install diffutils python3-isort python3-enchant python3-pylint python3-autopep8 python3-black RUN dnf -y builddep python-avocado -# These should be in Avocado 98.0 packages, but until the module -# has those versions, we need to manually install them here -RUN dnf -y install python3-elementpath python3-xmlschema RUN dnf -y clean all diff --git a/docs/source/blueprints/BP005.rst b/docs/source/blueprints/BP005.rst index a817502f3e..187c77b69b 100644 --- a/docs/source/blueprints/BP005.rst +++ b/docs/source/blueprints/BP005.rst @@ -24,7 +24,7 @@ test writers efforts in the features to be tested, and away from debugging and troubleshooting those libraries. While this repository initially targets Avocado users writing tests, it should be generic enough for consumption by third-party developers outside of the Avocado community. It presents a glimpse -of the infrastructure that will guarantee quality criterias and will ease the +of the infrastructure that will guarantee quality criteria and will ease the onboarding and maintenance of such libraries. Besides the plan, with the migration steps for the current libraries, this diff --git a/docs/source/guides/contributor/chapters/how.rst b/docs/source/guides/contributor/chapters/how.rst index bc6929ade9..d83f803cfa 100644 --- a/docs/source/guides/contributor/chapters/how.rst +++ b/docs/source/guides/contributor/chapters/how.rst @@ -128,13 +128,7 @@ the code based on some review and, if not, why. Feel free to disagree with the reviewer, they probably have different use cases and opinions, which is expected. Try describing yours and suggest other solutions, if necessary. -New versions of your code should not be force-updated (unless explicitly -requested by the code reviewer). Instead, you should: - -- Create a new branch out of your previous branch:: - - $ git checkout my_new_local_branch - $ git checkout -b my_new_local_branch_v2 +Then, proceed to make the changes. This is a typical workflow: - Code, and amend the commit(s) and/or create new commits. If you have more than one commit in the PR, you will probably need to rebase @@ -149,13 +143,13 @@ requested by the code reviewer). Instead, you should: - Push your changes:: - $ git push origin my_new_local_branch_v2 - -- Create a new Pull Request for this new branch. In the Pull Request - description, point the previous Pull Request and the changes the current Pull - Request introduced when compared to the previous Pull Request(s). + $ git push --force origin my_new_local_branch -- Close the previous Pull Request on GitHub. +Please communicate to the reviewers what the summary of changes are. +Also, make use of GitHub's features to ease the reviewers' life, such +as marking comments as "resolved". Reviewers should make use of +GitHub's "compare" feature to more easily verify the changes since the +last iteration. After your PR gets merged, you can sync the master branch on your local repository propagate the sync to the master branch in your fork repository on diff --git a/docs/source/guides/contributor/chapters/rfc.rst b/docs/source/guides/contributor/chapters/rfc.rst index 1ea83b19a6..d8527f6e38 100644 --- a/docs/source/guides/contributor/chapters/rfc.rst +++ b/docs/source/guides/contributor/chapters/rfc.rst @@ -6,8 +6,8 @@ What is a RFC? .. warning:: TODO: Better describe our RFC model here. -Submiting a RFC ---------------- +Submitting a RFC +---------------- .. warning:: TODO: Better describe our RFC model here. diff --git a/docs/source/guides/contributor/chapters/tips.rst b/docs/source/guides/contributor/chapters/tips.rst index b2202a9fa2..bdf4abd29d 100644 --- a/docs/source/guides/contributor/chapters/tips.rst +++ b/docs/source/guides/contributor/chapters/tips.rst @@ -28,10 +28,10 @@ During the execution look for:: .. note:: If you are running a test with Avocado, and want to measure the duration - of a method/function, make sure to enable the `debug` logging stream. - Example:: + of a method/function, make sure to enable the `avocado.utils.debug` + logging stream. Example:: - avocado --show avocado.app.debug run examples/tests/assets.py + avocado --show avocado.utils.debug run examples/tests/assets.py Line-profiler ------------- diff --git a/docs/source/guides/user/chapters/dependencies.rst b/docs/source/guides/user/chapters/dependencies.rst index e9ec9cdc3d..ed454efec2 100644 --- a/docs/source/guides/user/chapters/dependencies.rst +++ b/docs/source/guides/user/chapters/dependencies.rst @@ -42,6 +42,15 @@ with `avocado cache list`. If such a change is made to the environment, it's recommended to clear the dependencies cache with `$avocado cache clear`. +Dependency logs +--------------- + +Each dependency will create its own log directory where you can find logs related to +this dependence. Dependencies logs related to one job are stored in +`{avocado_dir}/job-results/{job}/dependencies` and for each dependence logs the symlink +to correct test logs directory is created. Therefore, if your test has dependencies, +you can find dependency logs in `{avocado_dir}/job-results/{job}/test-results/{test_id}/dependencies` + Defining a test dependency --------------------------- diff --git a/docs/source/guides/user/chapters/introduction.rst b/docs/source/guides/user/chapters/introduction.rst index 66b4215db7..f43ae2aba8 100644 --- a/docs/source/guides/user/chapters/introduction.rst +++ b/docs/source/guides/user/chapters/introduction.rst @@ -392,9 +392,9 @@ Using the option --show Probably, you frequently want to look straight at the job log, without switching screens or having to "tail" the job log. -In order to do that, you can use ``avocado --show=test run ...``:: +In order to do that, you can use ``avocado --show=job run ...``:: - $ avocado --show=test run examples/tests/sleeptest.py + $ avocado --show=job run examples/tests/sleeptest.py ... Job ID: f9ea1742134e5352dec82335af584d1f151d4b85 @@ -471,11 +471,11 @@ That's basically the only rule, and a sane one, that you need to follow. terminals, basically pipes the colored output to ``less`` to simplify browsing of the produced output. You an enable it with ``--enable-paginator``. +.. _sysinfo-collection: + Sysinfo collection ------------------ -.. note:: This feature is not fully supported on nrunner runner yet. - Avocado comes with a ``sysinfo`` plugin, which automatically gathers some system information per each job or even between tests. This is very useful when later we want to know what caused the test's failure. This system diff --git a/docs/source/guides/user/chapters/logging.rst b/docs/source/guides/user/chapters/logging.rst index 40707237fb..288c3becf1 100644 --- a/docs/source/guides/user/chapters/logging.rst +++ b/docs/source/guides/user/chapters/logging.rst @@ -1,7 +1,8 @@ Avocado logging system ====================== -This section describes the logging system used in Avocado. +This section describes basic of the logging system used in Avocado. For more +detailed information about avocado logging system, please go to :ref:`advanced-logging`. Tweaking the UI --------------- @@ -19,7 +20,7 @@ streams) are listed below: :app: The text based UI (avocado.app) :test: Output of the executed tests (avocado.test, "") -:debug: Messages useful to debug the Avocado Framework (avocado.app.debug) +:job: Avocado job related logs (avocado.job) :early: Early logging before the logging system is set. It includes the test output and lots of output produced by used libraries. ("", avocado.test) @@ -39,9 +40,26 @@ directory by running:: $ avocado run --store-logging-stream STREAM[:LEVEL][,STREAM[:LEVEL][,...] -This will produce ``$STREAM.$LEVEL`` files per each (unique) entry in the test -results directory. +This will produce ``$STREAM.$LEVEL`` files per each (unique) entry in two places. +One in each test result directory where the stream is separated by test and one in +job log directory where the stream will be gathered from all tests into one place. +This might be useful when you don't use parallel run and you want to have logs +from all tests in one place. .. note:: You have to specify separated logging streams. You can't use the built-in streams in this function. + +Avocado log files +----------------- + +Avocado use three main files for logging information on different levels. +All of them are stored in ``{avocado_dir}/job-results/{job}/``: + +:job.log: Logs related to job run under avocado.job namespace. +:full.log: All generated logs in one job run. It contains all logs from + avocado internal to logs from all tests. If you run your tests + in parallel the logs might be overlap since the logs are not + sorted and they are stored at the time when they have been created. +:debug.log: Logs generated by one specific test and they are separated from other + tests logs. Logs are stored in ``{avocado_dir}/job-results/{job}/test-results/{test_id}/debug.log`` diff --git a/docs/source/guides/writer/chapters/logging.rst b/docs/source/guides/writer/chapters/logging.rst index 4a364b95dd..ec05b4003a 100644 --- a/docs/source/guides/writer/chapters/logging.rst +++ b/docs/source/guides/writer/chapters/logging.rst @@ -1,3 +1,5 @@ +.. _advanced-logging: + Advanced logging capabilities ============================= @@ -10,36 +12,51 @@ clear stages on a single test: .. literalinclude:: ../../../../../examples/tests/logging_streams.py -.. note:: TODO: Improve how we show the logs on the console. +Currently Avocado will store any log information that has been generated +during job run into specific log files based on used logging namespace +and process where the logs have been generated. + +Avocado generate three types of log each of them with different purpose: + +**1. job.log** + +The `job.log` file is generated for each avocado job and it is stored directly +in `log_dir` directory. This file contains only logs from `avocado.job` +namespace and holds information about job configuration and test statuses +which were run inside the job. +The format of logs is:: + +